summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_amba.c3
-rw-r--r--drivers/acpi/acpi_apd.c3
-rw-r--r--drivers/acpi/acpi_video.c83
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acdebug.h10
-rw-r--r--drivers/acpi/acpica/acevents.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h11
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h62
-rw-r--r--drivers/acpi/acpica/acmacros.h21
-rw-r--r--drivers/acpi/acpica/acnamesp.h5
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h14
-rw-r--r--drivers/acpi/acpica/acresrc.h12
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h58
-rw-r--r--drivers/acpi/acpica/dbcmds.c4
-rw-r--r--drivers/acpi/acpica/dbconvert.c8
-rw-r--r--drivers/acpi/acpica/dbexec.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c19
-rw-r--r--drivers/acpi/acpica/dbnames.c4
-rw-r--r--drivers/acpi/acpica/dbutils.c9
-rw-r--r--drivers/acpi/acpica/dbxface.c4
-rw-r--r--drivers/acpi/acpica/dscontrol.c4
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswload.c4
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswstate.c10
-rw-r--r--drivers/acpi/acpica/evgpe.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c4
-rw-r--r--drivers/acpi/acpica/evgpeutil.c4
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c3
-rw-r--r--drivers/acpi/acpica/evregion.c74
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c439
-rw-r--r--drivers/acpi/acpica/exconfig.c4
-rw-r--r--drivers/acpi/acpica/exconvrt.c8
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdump.c15
-rw-r--r--drivers/acpi/acpica/exfield.c4
-rw-r--r--drivers/acpi/acpica/exfldio.c14
-rw-r--r--drivers/acpi/acpica/exmisc.c290
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c8
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exregion.c6
-rw-r--r--drivers/acpi/acpica/exresnte.c4
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c4
-rw-r--r--drivers/acpi/acpica/exstorob.c4
-rw-r--r--drivers/acpi/acpica/exutils.c12
-rw-r--r--drivers/acpi/acpica/hwgpe.c6
-rw-r--r--drivers/acpi/acpica/hwregs.c291
-rw-r--r--drivers/acpi/acpica/hwxface.c11
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nsconvert.c9
-rw-r--r--drivers/acpi/acpica/nsdump.c9
-rw-r--r--drivers/acpi/acpica/nsinit.c76
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c4
-rw-r--r--drivers/acpi/acpica/nsprepkg.c92
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c6
-rw-r--r--drivers/acpi/acpica/nsutils.c8
-rw-r--r--drivers/acpi/acpica/nsxfeval.c113
-rw-r--r--drivers/acpi/acpica/nsxfname.c6
-rw-r--r--drivers/acpi/acpica/nsxfobj.c6
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c4
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c90
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c50
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c9
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c21
-rw-r--r--drivers/acpi/acpica/rsutils.c14
-rw-r--r--drivers/acpi/acpica/rsxface.c6
-rw-r--r--drivers/acpi/acpica/tbdata.c15
-rw-r--r--drivers/acpi/acpica/tbfadt.c28
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c6
-rw-r--r--drivers/acpi/acpica/tbutils.c33
-rw-r--r--drivers/acpi/acpica/tbxface.c6
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c8
-rw-r--r--drivers/acpi/acpica/utalloc.c5
-rw-r--r--drivers/acpi/acpica/utascii.c140
-rw-r--r--drivers/acpi/acpica/utbuffer.c24
-rw-r--r--drivers/acpi/acpica/utcache.c7
-rw-r--r--drivers/acpi/acpica/utcopy.c16
-rw-r--r--drivers/acpi/acpica/utdebug.c47
-rw-r--r--drivers/acpi/acpica/utdecode.c30
-rw-r--r--drivers/acpi/acpica/uteval.c4
-rw-r--r--drivers/acpi/acpica/utglobal.c48
-rw-r--r--drivers/acpi/acpica/utids.c8
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utnonansi.c67
-rw-r--r--drivers/acpi/acpica/utobject.c18
-rw-r--r--drivers/acpi/acpica/utosi.c4
-rw-r--r--drivers/acpi/acpica/utownerid.c6
-rw-r--r--drivers/acpi/acpica/utprint.c19
-rw-r--r--drivers/acpi/acpica/utstring.c71
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utxface.c4
-rw-r--r--drivers/acpi/blacklist.c196
-rw-r--r--drivers/acpi/bus.c39
-rw-r--r--drivers/acpi/device_sysfs.c44
-rw-r--r--drivers/acpi/ec.c241
-rw-r--r--drivers/acpi/evged.c154
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/numa.c16
-rw-r--r--drivers/acpi/osi.c522
-rw-r--r--drivers/acpi/osl.c505
-rw-r--r--drivers/acpi/pci_link.c131
-rw-r--r--drivers/acpi/sleep.c7
-rw-r--r--drivers/acpi/sysfs.c7
-rw-r--r--drivers/acpi/tables.c316
-rw-r--r--drivers/acpi/utils.c6
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/base/platform.c19
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/base/power/domain.c145
-rw-r--r--drivers/base/power/domain_governor.c20
-rw-r--r--drivers/base/power/main.c18
-rw-r--r--drivers/base/power/opp/Makefile1
-rw-r--r--drivers/base/power/opp/core.c440
-rw-r--r--drivers/base/power/opp/cpu.c199
-rw-r--r--drivers/base/power/opp/of.c591
-rw-r--r--drivers/base/power/opp/opp.h14
-rw-r--r--drivers/base/power/runtime.c9
-rw-r--r--drivers/base/property.c34
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c12
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/skd_main.c61
-rw-r--r--drivers/block/virtio_blk.c6
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/char/hw_random/Kconfig29
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/exynos-rng.c33
-rw-r--r--drivers/char/hw_random/hisi-rng.c126
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c147
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c65
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/clocksource/Kconfig6
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/dw_apb_timer.c1
-rw-r--r--drivers/clocksource/mps2-timer.c275
-rw-r--r--drivers/clocksource/mtk_timer.c2
-rw-r--r--drivers/clocksource/tegra20_timer.c14
-rw-r--r--drivers/cpufreq/Kconfig45
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c129
-rw-r--r--drivers/cpufreq/arm_big_little.c54
-rw-r--r--drivers/cpufreq/arm_big_little.h4
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c21
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c21
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c94
-rw-r--r--drivers/cpufreq/cpufreq-dt.c22
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c28
-rw-r--r--drivers/cpufreq/cpufreq.c164
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c25
-rw-r--r--drivers/cpufreq/cpufreq_governor.c271
-rw-r--r--drivers/cpufreq/cpufreq_governor.h46
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c84
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c29
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c43
-rw-r--r--drivers/cpufreq/e_powersaver.c76
-rw-r--r--drivers/cpufreq/elanfreq.c4
-rw-r--r--drivers/cpufreq/hisi-acpu-cpufreq.c42
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c303
-rw-r--r--drivers/cpufreq/longhaul.c86
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c7
-rw-r--r--drivers/cpufreq/maple-cpufreq.c11
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c14
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c107
-rw-r--r--drivers/cpufreq/omap-cpufreq.c7
-rw-r--r--drivers/cpufreq/p4-clockmod.c19
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c14
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c47
-rw-r--r--drivers/cpufreq/powernow-k6.c16
-rw-r--r--drivers/cpufreq/powernow-k7.c70
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c272
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c15
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c18
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c9
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c15
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c4
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c59
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c10
-rw-r--r--drivers/cpufreq/sc520_freq.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c47
-rw-r--r--drivers/cpufreq/speedstep-centrino.c6
-rw-r--r--drivers/cpufreq/speedstep-ich.c8
-rw-r--r--drivers/cpufreq/speedstep-lib.c11
-rw-r--r--drivers/cpufreq/speedstep-smi.c7
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c7
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle.c14
-rw-r--r--drivers/crypto/Kconfig27
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c7
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h1
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c131
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h34
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/ccp/Kconfig2
-rw-r--r--drivers/crypto/ccp/Makefile6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c13
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.h49
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c727
-rw-r--r--drivers/crypto/ccp/ccp-ops.c69
-rw-r--r--drivers/crypto/marvell/cesa.c10
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/marvell/tdma.c5
-rw-r--r--drivers/crypto/mxc-scc.c765
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-aes.c62
-rw-r--r--drivers/crypto/omap-des.c165
-rw-r--r--drivers/crypto/omap-sham.c25
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h28
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c40
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c15
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c92
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c61
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c4
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/crypto/s5p-sss.c368
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c10
-rw-r--r--drivers/crypto/talitos.c64
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl20
-rw-r--r--drivers/devfreq/Kconfig36
-rw-r--r--drivers/devfreq/Makefile4
-rw-r--r--drivers/devfreq/devfreq-event.c5
-rw-r--r--drivers/devfreq/devfreq.c207
-rw-r--r--drivers/devfreq/event/Kconfig8
-rw-r--r--drivers/devfreq/event/Makefile2
-rw-r--r--drivers/devfreq/event/exynos-nocp.c304
-rw-r--r--drivers/devfreq/event/exynos-nocp.h78
-rw-r--r--drivers/devfreq/exynos-bus.c570
-rw-r--r--drivers/devfreq/exynos/Makefile3
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c1055
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.h110
-rw-r--r--drivers/devfreq/exynos/exynos5_bus.c431
-rw-r--r--drivers/devfreq/exynos/exynos_ppmu.c119
-rw-r--r--drivers/devfreq/exynos/exynos_ppmu.h86
-rw-r--r--drivers/devfreq/governor_passive.c205
-rw-r--r--drivers/edac/Kconfig5
-rw-r--r--drivers/edac/altera_edac.c412
-rw-r--r--drivers/edac/altera_edac.h128
-rw-r--r--drivers/edac/amd64_edac.c129
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c3
-rw-r--r--drivers/edac/i7core_edac.c81
-rw-r--r--drivers/edac/ie31200_edac.c121
-rw-r--r--drivers/edac/mce_amd.c9
-rw-r--r--drivers/edac/sb_edac.c222
-rw-r--r--drivers/firmware/efi/Kconfig25
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/arm-init.c104
-rw-r--r--drivers/firmware/efi/arm-runtime.c45
-rw-r--r--drivers/firmware/efi/capsule-loader.c343
-rw-r--r--drivers/firmware/efi/capsule.c308
-rw-r--r--drivers/firmware/efi/efi.c48
-rw-r--r--drivers/firmware/efi/efibc.c113
-rw-r--r--drivers/firmware/efi/efivars.c5
-rw-r--r--drivers/firmware/efi/fake_mem.c43
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c77
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c37
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c15
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c6
-rw-r--r--drivers/firmware/efi/libstub/fdt.c24
-rw-r--r--drivers/firmware/efi/libstub/gop.c354
-rw-r--r--drivers/firmware/efi/memattr.c182
-rw-r--r--drivers/firmware/efi/reboot.c12
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c60
-rw-r--r--drivers/firmware/efi/vars.c56
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/gpu/drm/drm_cache.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ads7828.c10
-rw-r--r--drivers/hwmon/fam15h_power.c215
-rw-r--r--drivers/hwmon/it87.c2261
-rw-r--r--drivers/hwmon/max31722.c165
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/ide/ide-disk.c6
-rw-r--r--drivers/idle/intel_idle.c137
-rw-r--r--drivers/input/joystick/analog.c6
-rw-r--r--drivers/iommu/irq_remapping.c2
-rw-r--r--drivers/irqchip/Kconfig9
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-alpine-msi.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c10
-rw-r--r--drivers/irqchip/irq-crossbar.c2
-rw-r--r--drivers/irqchip/irq-gic-common.c20
-rw-r--r--drivers/irqchip/irq-gic-v2m.c19
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c42
-rw-r--r--drivers/irqchip/irq-gic-v3.c195
-rw-r--r--drivers/irqchip/irq-gic.c327
-rw-r--r--drivers/irqchip/irq-lpc32xx.c238
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c240
-rw-r--r--drivers/irqchip/irq-mbigen.c4
-rw-r--r--drivers/irqchip/irq-partition-percpu.c256
-rw-r--r--drivers/irqchip/irq-tegra.c2
-rw-r--r--drivers/leds/Kconfig5
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-gpio.c4
-rw-r--r--drivers/leds/leds-ss4200.c13
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/trigger/Kconfig18
-rw-r--r--drivers/leds/trigger/Makefile2
-rw-r--r--drivers/leds/trigger/ledtrig-ide-disk.c3
-rw-r--r--drivers/leds/trigger/ledtrig-mtd.c45
-rw-r--r--drivers/leds/trigger/ledtrig-panic.c77
-rw-r--r--drivers/lguest/x86/core.c2
-rw-r--r--drivers/lightnvm/core.c370
-rw-r--r--drivers/lightnvm/gennvm.c100
-rw-r--r--drivers/lightnvm/rrpc.c42
-rw-r--r--drivers/lightnvm/rrpc.h2
-rw-r--r--drivers/lightnvm/sysblk.c284
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-mpath.c351
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm-thin.c165
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid5-cache.c3
-rw-r--r--drivers/mfd/intel-lpss-acpi.c12
-rw-r--r--drivers/mfd/intel-lpss-pci.c20
-rw-r--r--drivers/mfd/intel-lpss.c2
-rw-r--r--drivers/mfd/intel-lpss.h4
-rw-r--r--drivers/mfd/mfd-core.c4
-rw-r--r--drivers/misc/sgi-gru/grukservices.c38
-rw-r--r--drivers/mmc/card/block.c47
-rw-r--r--drivers/mmc/core/Kconfig21
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/host.c22
-rw-r--r--drivers/mmc/core/mmc.c64
-rw-r--r--drivers/mmc/core/pwrseq.c108
-rw-r--r--drivers/mmc/core/pwrseq.h19
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c81
-rw-r--r--drivers/mmc/core/pwrseq_simple.c91
-rw-r--r--drivers/mmc/core/sdio_cis.c7
-rw-r--r--drivers/mmc/host/Kconfig4
-rw-r--r--drivers/mmc/host/atmel-mci.c9
-rw-r--r--drivers/mmc/host/davinci_mmc.c151
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c23
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c43
-rw-r--r--drivers/mmc/host/dw_mmc.c11
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/mmci.c20
-rw-r--r--drivers/mmc/host/mtk-sd.c19
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/omap_hsmmc.c90
-rw-r--r--drivers/mmc/host/sdhci-acpi.c9
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c74
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c9
-rw-r--r--drivers/mmc/host/sdhci-pic32.c1
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c42
-rw-r--r--drivers/mmc/host/sdhci.c320
-rw-r--r--drivers/mmc/host/sdhci.h10
-rw-r--r--drivers/mmc/host/sh_mmcif.c74
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c194
-rw-r--r--drivers/mmc/host/tmio_mmc.h75
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c1
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c196
-rw-r--r--drivers/mmc/host/toshsd.c1
-rw-r--r--drivers/mmc/host/usdhi6rol0.c60
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdcore.c19
-rw-r--r--drivers/mtd/nand/nand_base.c29
-rw-r--r--drivers/net/hamradio/baycom_epp.c8
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/core.c272
-rw-r--r--drivers/nvme/host/lightnvm.c82
-rw-r--r--drivers/nvme/host/nvme.h91
-rw-r--r--drivers/nvme/host/pci.c249
-rw-r--r--drivers/of/Kconfig3
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/of_numa.c211
-rw-r--r--drivers/perf/arm_pmu.c8
-rw-r--r--drivers/platform/x86/acer-wmi.c16
-rw-r--r--drivers/platform/x86/eeepc-wmi.c24
-rw-r--r--drivers/pnp/pnpbios/core.c3
-rw-r--r--drivers/power/avs/rockchip-io-domain.c10
-rw-r--r--drivers/powercap/intel_rapl.c71
-rw-r--r--drivers/pwm/core.c21
-rw-r--r--drivers/pwm/pwm-clps711x.c2
-rw-r--r--drivers/pwm/pwm-pxa.c2
-rw-r--r--drivers/regulator/Kconfig17
-rw-r--r--drivers/regulator/Makefile8
-rw-r--r--drivers/regulator/act8865-regulator.c113
-rw-r--r--drivers/regulator/as3722-regulator.c65
-rw-r--r--drivers/regulator/core.c279
-rw-r--r--drivers/regulator/fan53555.c24
-rw-r--r--drivers/regulator/helpers.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/lp873x-regulator.c241
-rw-r--r--drivers/regulator/max14577-regulator.c (renamed from drivers/regulator/max14577.c)0
-rw-r--r--drivers/regulator/max77620-regulator.c83
-rw-r--r--drivers/regulator/max77686-regulator.c17
-rw-r--r--drivers/regulator/max77693-regulator.c (renamed from drivers/regulator/max77693.c)0
-rw-r--r--drivers/regulator/max77802-regulator.c2
-rw-r--r--drivers/regulator/max8973-regulator.c97
-rw-r--r--drivers/regulator/max8997-regulator.c (renamed from drivers/regulator/max8997.c)2
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c67
-rw-r--r--drivers/regulator/pv88080-regulator.c419
-rw-r--r--drivers/regulator/pv88080-regulator.h92
-rw-r--r--drivers/regulator/pwm-regulator.c78
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c275
-rw-r--r--drivers/regulator/rk808-regulator.c310
-rw-r--r--drivers/regulator/s2mps11.c13
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/regulator/twl-regulator.c106
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/soc/qcom/spm.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c2
-rw-r--r--drivers/target/target_core_iblock.c6
-rw-r--r--drivers/thermal/Kconfig28
-rw-r--r--drivers/thermal/int340x_thermal/Kconfig42
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c236
-rw-r--r--drivers/video/backlight/backlight.c39
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/efifb.c21
-rw-r--r--drivers/xen/efi.c1
481 files changed, 20077 insertions, 10440 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 82b96ee8624c..b7e2e776397d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,10 +5,10 @@
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on !IA64_HP_SIM
- depends on IA64 || X86 || (ARM64 && EXPERT)
+ depends on IA64 || X86 || ARM64
depends on PCI
select PNP
- default y
+ default y if (IA64 || X86)
help
Advanced Configuration and Power Interface (ACPI) support for
Linux requires an ACPI-compliant platform (hardware/firmware),
@@ -311,12 +311,12 @@ config ACPI_CUSTOM_DSDT
bool
default ACPI_CUSTOM_DSDT_FILE != ""
-config ACPI_INITRD_TABLE_OVERRIDE
- bool "ACPI tables override via initrd"
+config ACPI_TABLE_UPGRADE
+ bool "Allow upgrading ACPI tables via initrd"
depends on BLK_DEV_INITRD && X86
- default n
+ default y
help
- This option provides functionality to override arbitrary ACPI tables
+ This option provides functionality to upgrade arbitrary ACPI tables
via initrd. No functional change if no ACPI tables are passed via
initrd, therefore it's safe to say Y.
See Documentation/acpi/initrd_table_override.txt for details
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index edeb2d1d99be..251ce85a66fb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ACPI) += acpi.o \
acpica/
# All the builtin files are in the "acpi." module_param namespace.
-acpi-y += osl.o utils.o reboot.o
+acpi-y += osi.o osl.o utils.o reboot.o
acpi-y += nvs.o
# Power management related files
@@ -47,6 +47,7 @@ acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += int340x_thermal.o
acpi-y += power.o
acpi-y += event.o
+acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
index 2a61b54ab968..7f77c071709a 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/acpi_amba.c
@@ -35,8 +35,7 @@ static void amba_register_dummy_clk(void)
if (amba_dummy_clk)
return;
- amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL,
- CLK_IS_ROOT, 0);
+ amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
}
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index f245bf35bedb..1daf9c46df8e 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -62,8 +62,7 @@ static int acpi_apd_setup(struct apd_private_data *pdata)
if (dev_desc->fixed_clk_rate) {
clk = clk_register_fixed_rate(&pdata->adev->dev,
dev_name(&pdata->adev->dev),
- NULL, CLK_IS_ROOT,
- dev_desc->fixed_clk_rate);
+ NULL, 0, dev_desc->fixed_clk_rate);
clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
pdata->clk = clk;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 4361bc98ef4c..3d5b8a099351 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -191,19 +191,6 @@ struct acpi_video_device_cap {
u8 _DDC:1; /* Return the EDID for this device */
};
-struct acpi_video_brightness_flags {
- u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
- u8 _BCL_reversed:1; /* _BCL package is in a reversed order */
- u8 _BQC_use_index:1; /* _BQC returns an index value */
-};
-
-struct acpi_video_device_brightness {
- int curr;
- int count;
- int *levels;
- struct acpi_video_brightness_flags flags;
-};
-
struct acpi_video_device {
unsigned long device_id;
struct acpi_video_device_flags flags;
@@ -325,7 +312,7 @@ static const struct thermal_cooling_device_ops video_cooling_ops = {
*/
static int
-acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
+acpi_video_device_lcd_query_levels(acpi_handle handle,
union acpi_object **levels)
{
int status;
@@ -335,7 +322,7 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
*levels = NULL;
- status = acpi_evaluate_object(device->dev->handle, "_BCL", NULL, &buffer);
+ status = acpi_evaluate_object(handle, "_BCL", NULL, &buffer);
if (!ACPI_SUCCESS(status))
return status;
obj = (union acpi_object *)buffer.pointer;
@@ -766,36 +753,28 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
return 0;
}
-
-/*
- * Arg:
- * device : video output device (LCD, CRT, ..)
- *
- * Return Value:
- * Maximum brightness level
- *
- * Allocate and initialize device->brightness.
- */
-
-static int
-acpi_video_init_brightness(struct acpi_video_device *device)
+int acpi_video_get_levels(struct acpi_device *device,
+ struct acpi_video_device_brightness **dev_br)
{
union acpi_object *obj = NULL;
int i, max_level = 0, count = 0, level_ac_battery = 0;
- unsigned long long level, level_old;
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
- int result = -EINVAL;
+ int result = 0;
u32 value;
- if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
+ if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device->handle,
+ &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
"LCD brightness level\n"));
+ result = -ENODEV;
goto out;
}
- if (obj->package.count < 2)
+ if (obj->package.count < 2) {
+ result = -EINVAL;
goto out;
+ }
br = kzalloc(sizeof(*br), GFP_KERNEL);
if (!br) {
@@ -861,6 +840,38 @@ acpi_video_init_brightness(struct acpi_video_device *device)
"Found unordered _BCL package"));
br->count = count;
+ *dev_br = br;
+
+out:
+ kfree(obj);
+ return result;
+out_free:
+ kfree(br);
+ goto out;
+}
+EXPORT_SYMBOL(acpi_video_get_levels);
+
+/*
+ * Arg:
+ * device : video output device (LCD, CRT, ..)
+ *
+ * Return Value:
+ * Maximum brightness level
+ *
+ * Allocate and initialize device->brightness.
+ */
+
+static int
+acpi_video_init_brightness(struct acpi_video_device *device)
+{
+ int i, max_level = 0;
+ unsigned long long level, level_old;
+ struct acpi_video_device_brightness *br = NULL;
+ int result = -EINVAL;
+
+ result = acpi_video_get_levels(device->dev, &br);
+ if (result)
+ return result;
device->brightness = br;
/* _BQC uses INDEX while _BCL uses VALUE in some laptops */
@@ -903,17 +914,13 @@ set_level:
goto out_free_levels;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "found %d brightness levels\n", count - 2));
- kfree(obj);
- return result;
+ "found %d brightness levels\n", br->count - 2));
+ return 0;
out_free_levels:
kfree(br->levels);
-out_free:
kfree(br);
-out:
device->brightness = NULL;
- kfree(obj);
return result;
}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index f682374c19f4..227bb7bb19d7 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -43,6 +43,7 @@ acpi-y += \
evxfregn.o
acpi-y += \
+ exconcat.o \
exconfig.o \
exconvrt.o \
excreate.o \
@@ -149,6 +150,7 @@ acpi-y += \
acpi-y += \
utaddress.o \
utalloc.o \
+ utascii.o \
utbuffer.o \
utcopy.o \
utexcep.o \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 993af9eb007a..f6404ea928cb 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -53,7 +53,7 @@
#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */
struct acpi_db_command_info {
- char *name; /* Command Name */
+ const char *name; /* Command Name */
u8 min_args; /* Minimum arguments required */
};
@@ -64,7 +64,7 @@ struct acpi_db_command_help {
};
struct acpi_db_argument_info {
- char *name; /* Argument Name */
+ const char *name; /* Argument Name */
};
struct acpi_db_execute_walk {
@@ -196,7 +196,7 @@ ACPI_DBR_DEPENDENT_RETURN_VOID(void
acpi_walk_state
*walk_state))
- acpi_status acpi_db_display_all_methods(char *display_count_arg);
+acpi_status acpi_db_display_all_methods(char *display_count_arg);
void acpi_db_display_arguments(void);
@@ -220,7 +220,7 @@ ACPI_DBR_DEPENDENT_RETURN_VOID(void
* dbexec - debugger control method execution
*/
void
-acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags);
+acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
void
acpi_db_create_execution_threads(char *num_threads_arg,
@@ -271,7 +271,7 @@ void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context);
acpi_status acpi_db_user_commands(void);
char *acpi_db_get_next_token(char *string,
- char **next, acpi_object_type * return_type);
+ char **next, acpi_object_type *return_type);
/*
* dbobject
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 010cf81bada9..77af91cf46d4 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -72,6 +72,7 @@ acpi_status acpi_ev_init_global_lock_handler(void);
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
acpi_ev_acquire_global_lock(u16 timeout))
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+
acpi_status acpi_ev_remove_global_lock_handler(void);
/*
@@ -198,8 +199,6 @@ void
acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
-void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj);
-
void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id, u32 function);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 51b073b68f16..fded776236e2 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -187,6 +187,8 @@ extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
+extern const char acpi_gbl_lower_hex_digits[];
+extern const char acpi_gbl_upper_hex_digits[];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
@@ -361,6 +363,15 @@ ACPI_GLOBAL(u32, acpi_gbl_num_objects);
#endif /* ACPI_DEBUGGER */
+#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
+
+ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
+
+#endif
+
/*****************************************************************************
*
* Application globals
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index bae1a35c345f..7ead235555cf 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -67,7 +67,7 @@
typedef const struct acpi_exdump_info {
u8 opcode;
u8 offset;
- char *name;
+ const char *name;
} acpi_exdump_info;
@@ -370,7 +370,7 @@ acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
acpi_status
acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
union acpi_operand_object *operand,
- acpi_object_type * return_type,
+ acpi_object_type *return_type,
union acpi_operand_object **return_desc);
/*
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 9562a10a1a18..13331d70dea0 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -278,7 +278,7 @@ struct acpi_create_field_info {
};
typedef
-acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
+acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
/*
* Bitmapped ACPI types. Used internally only
@@ -395,11 +395,12 @@ union acpi_predefined_info {
/* Return object auto-repair info */
-typedef acpi_status(*acpi_object_converter) (struct acpi_namespace_node * scope,
- union acpi_operand_object
- *original_object,
- union acpi_operand_object
- **converted_object);
+typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
+ scope,
+ union acpi_operand_object *
+ original_object,
+ union acpi_operand_object **
+ converted_object);
struct acpi_simple_repair_info {
char name[ACPI_NAME_SIZE];
@@ -539,10 +540,10 @@ struct acpi_gpe_device_info {
struct acpi_namespace_node *gpe_device;
};
-typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
- gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context);
+typedef acpi_status (*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
+ gpe_xrupt_info,
+ struct acpi_gpe_block_info *
+ gpe_block, void *context);
/* Information about each particular fixed event */
@@ -657,10 +658,11 @@ struct acpi_result_values {
};
typedef
-acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
- union acpi_parse_object ** out_op);
+acpi_status (*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
+ union acpi_parse_object ** out_op);
-typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
+typedef
+acpi_status (*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
/* Global handlers for AML Notifies */
@@ -700,7 +702,8 @@ union acpi_generic_state {
*
****************************************************************************/
-typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state);
+typedef
+acpi_status (*acpi_execute_op) (struct acpi_walk_state * walk_state);
/* Address Range info block */
@@ -853,24 +856,24 @@ struct acpi_parse_state {
/* Parse object flags */
-#define ACPI_PARSEOP_GENERIC 0x01
-#define ACPI_PARSEOP_NAMED 0x02
-#define ACPI_PARSEOP_DEFERRED 0x04
-#define ACPI_PARSEOP_BYTELIST 0x08
-#define ACPI_PARSEOP_IN_STACK 0x10
-#define ACPI_PARSEOP_TARGET 0x20
-#define ACPI_PARSEOP_IN_CACHE 0x80
+#define ACPI_PARSEOP_GENERIC 0x01
+#define ACPI_PARSEOP_NAMED_OBJECT 0x02
+#define ACPI_PARSEOP_DEFERRED 0x04
+#define ACPI_PARSEOP_BYTELIST 0x08
+#define ACPI_PARSEOP_IN_STACK 0x10
+#define ACPI_PARSEOP_TARGET 0x20
+#define ACPI_PARSEOP_IN_CACHE 0x80
/* Parse object disasm_flags */
-#define ACPI_PARSEOP_IGNORE 0x01
-#define ACPI_PARSEOP_PARAMLIST 0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
-#define ACPI_PARSEOP_PREDEF_CHECKED 0x08
-#define ACPI_PARSEOP_CLOSING_PAREN 0x10
-#define ACPI_PARSEOP_COMPOUND 0x20
-#define ACPI_PARSEOP_ASSIGNMENT 0x40
-#define ACPI_PARSEOP_ELSEIF 0x80
+#define ACPI_PARSEOP_IGNORE 0x01
+#define ACPI_PARSEOP_PARAMETER_LIST 0x02
+#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
+#define ACPI_PARSEOP_PREDEFINED_CHECKED 0x08
+#define ACPI_PARSEOP_CLOSING_PAREN 0x10
+#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT 0x20
+#define ACPI_PARSEOP_ASSIGNMENT 0x40
+#define ACPI_PARSEOP_ELSEIF 0x80
/*****************************************************************************
*
@@ -1096,6 +1099,7 @@ struct acpi_external_list {
#define ACPI_EXT_ORIGIN_FROM_FILE 0x02 /* External came from a file */
#define ACPI_EXT_INTERNAL_PATH_ALLOCATED 0x04 /* Deallocate internal path on completion */
#define ACPI_EXT_EXTERNAL_EMITTED 0x08 /* External() statement has been emitted */
+#define ACPI_EXT_ORIGIN_FROM_OPCODE 0x10 /* External came from a External() opcode */
struct acpi_external_file {
char *path;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 411c18b7d541..a3b95431b7c5 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -260,14 +260,31 @@
#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1))
+/* Generic (power-of-two) rounding */
+
+#define ACPI_IS_ALIGNED(a, s) (((a) & ((s) - 1)) == 0)
+#define ACPI_IS_POWER_OF_TWO(a) ACPI_IS_ALIGNED(a, a)
+
/*
* Bitmask creation
* Bit positions start at zero.
* MASK_BITS_ABOVE creates a mask starting AT the position and above
* MASK_BITS_BELOW creates a mask starting one bit BELOW the position
+ * MASK_BITS_ABOVE/BELOW accpets a bit offset to create a mask
+ * MASK_BITS_ABOVE/BELOW_32/64 accpets a bit width to create a mask
+ * Note: The ACPI_INTEGER_BIT_SIZE check is used to bypass compiler
+ * differences with the shift operator
*/
#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_UINT64_MAX) << ((u32) (position))))
#define ACPI_MASK_BITS_BELOW(position) ((ACPI_UINT64_MAX) << ((u32) (position)))
+#define ACPI_MASK_BITS_ABOVE_32(width) ((u32) ACPI_MASK_BITS_ABOVE(width))
+#define ACPI_MASK_BITS_BELOW_32(width) ((u32) ACPI_MASK_BITS_BELOW(width))
+#define ACPI_MASK_BITS_ABOVE_64(width) ((width) == ACPI_INTEGER_BIT_SIZE ? \
+ ACPI_UINT64_MAX : \
+ ACPI_MASK_BITS_ABOVE(width))
+#define ACPI_MASK_BITS_BELOW_64(width) ((width) == ACPI_INTEGER_BIT_SIZE ? \
+ (u64) 0 : \
+ ACPI_MASK_BITS_BELOW(width))
/* Bitfields within ACPI registers */
@@ -283,10 +300,10 @@
/* Generic bitfield macros and masks */
#define ACPI_GET_BITS(source_ptr, position, mask) \
- ((*source_ptr >> position) & mask)
+ ((*(source_ptr) >> (position)) & (mask))
#define ACPI_SET_BITS(target_ptr, position, mask, value) \
- (*target_ptr |= ((value & mask) << position))
+ (*(target_ptr) |= (((value) & (mask)) << (position)))
#define ACPI_1BIT_MASK 0x00000001
#define ACPI_2BIT_MASK 0x00000003
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 022d69cb345a..f33a4ba8e0cb 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -206,9 +206,10 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth);
void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level);
void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component);
+acpi_ns_dump_pathname(acpi_handle handle,
+ const char *msg, u32 level, u32 component);
-void acpi_ns_print_pathname(u32 num_segments, char *pathname);
+void acpi_ns_print_pathname(u32 num_segments, const char *pathname);
acpi_status
acpi_ns_dump_one_object(acpi_handle obj_handle,
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 7da639d62416..fc305775c3d7 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -139,7 +139,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
*/
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
-char *acpi_ps_get_opcode_name(u16 opcode);
+const char *acpi_ps_get_opcode_name(u16 opcode);
u8 acpi_ps_get_argument_count(u32 op_type);
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 5faeab41e302..888440b2cf2e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -129,7 +129,8 @@ enum acpi_return_package_types {
ACPI_PTYPE2_REV_FIXED = 9,
ACPI_PTYPE2_FIX_VAR = 10,
ACPI_PTYPE2_VAR_VAR = 11,
- ACPI_PTYPE2_UUID_PAIR = 12
+ ACPI_PTYPE2_UUID_PAIR = 12,
+ ACPI_PTYPE_CUSTOM = 13
};
/* Support macros for users of the predefined info table */
@@ -340,7 +341,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_BIX", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (16 Int),(4 Str) */
- PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,
+ PACKAGE_INFO(ACPI_PTYPE_CUSTOM, ACPI_RTYPE_INTEGER, 16,
ACPI_RTYPE_STRING, 4, 0),
{{"_BLT",
@@ -523,6 +524,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+ {{"_FIT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, /* ACPI 6.0 */
+
{{"_FIX", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
@@ -1053,6 +1057,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
ACPI_RTYPE_BUFFER)}},
+ {{"_WPC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* ACPI 6.1 */
+
+ {{"_WPP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* ACPI 6.1 */
+
PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
};
#else
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 5dd58beafa5c..63da1e37caba 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -124,7 +124,7 @@ typedef enum {
typedef const struct acpi_rsdump_info {
u8 opcode;
u8 offset;
- char *name;
+ const char *name;
const char **pointer;
} acpi_rsdump_info;
@@ -209,7 +209,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
acpi_status
acpi_rs_get_method_data(acpi_handle handle,
- char *path, struct acpi_buffer *ret_buffer);
+ const char *path, struct acpi_buffer *ret_buffer);
acpi_status
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
@@ -223,16 +223,16 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
* rscalc
*/
acpi_status
-acpi_rs_get_list_length(u8 * aml_buffer,
- u32 aml_buffer_length, acpi_size * size_needed);
+acpi_rs_get_list_length(u8 *aml_buffer,
+ u32 aml_buffer_length, acpi_size *size_needed);
acpi_status
acpi_rs_get_aml_length(struct acpi_resource *resource_list,
- acpi_size resource_list_size, acpi_size * size_needed);
+ acpi_size resource_list_size, acpi_size *size_needed);
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
- acpi_size * buffer_size_needed);
+ acpi_size *buffer_size_needed);
acpi_status
acpi_rs_convert_aml_to_resources(u8 * aml,
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index b3b386e0b119..6235642e31d3 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -184,7 +184,7 @@ struct acpi_evaluate_info {
/* The first 3 elements are passed by the caller to acpi_ns_evaluate */
struct acpi_namespace_node *prefix_node; /* Input: starting node */
- char *relative_pathname; /* Input: path relative to prefix_node */
+ const char *relative_pathname; /* Input: path relative to prefix_node */
union acpi_operand_object **parameters; /* Input: argument list */
struct acpi_namespace_node *node; /* Resolved node (prefix_node:relative_pathname) */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 848ad3ac938f..cd5a135fcf29 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -161,8 +161,6 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
-u8 acpi_is_valid_signature(char *signature);
-
/*
* tbxfload
*/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index e43ab6f2ad7e..a7dbb2b882cf 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -136,16 +136,16 @@ extern const char *acpi_gbl_pt_decode[];
#define ACPI_SMALL_VARIABLE_LENGTH 3
typedef
-acpi_status(*acpi_walk_aml_callback) (u8 *aml,
- u32 length,
- u32 offset,
- u8 resource_index, void **context);
+acpi_status (*acpi_walk_aml_callback) (u8 *aml,
+ u32 length,
+ u32 offset,
+ u8 resource_index, void **context);
typedef
-acpi_status(*acpi_pkg_callback) (u8 object_type,
- union acpi_operand_object *source_object,
- union acpi_generic_state * state,
- void *context);
+acpi_status (*acpi_pkg_callback) (u8 object_type,
+ union acpi_operand_object * source_object,
+ union acpi_generic_state * state,
+ void *context);
struct acpi_pkg_info {
u8 *free_space;
@@ -167,6 +167,15 @@ struct acpi_pkg_info {
#define DB_QWORD_DISPLAY 8
/*
+ * utascii - ASCII utilities
+ */
+u8 acpi_ut_valid_nameseg(char *signature);
+
+u8 acpi_ut_valid_name_char(char character, u32 position);
+
+void acpi_ut_check_and_repair_ascii(u8 *name, char *repaired_name, u32 count);
+
+/*
* utnonansi - Non-ANSI C library functions
*/
void acpi_ut_strupr(char *src_string);
@@ -175,7 +184,14 @@ void acpi_ut_strlwr(char *src_string);
int acpi_ut_stricmp(char *string1, char *string2);
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+acpi_status
+acpi_ut_strtoul64(char *string,
+ u32 base, u32 max_integer_byte_width, u64 *ret_integer);
+
+/* Values for max_integer_byte_width above */
+
+#define ACPI_MAX32_BYTE_WIDTH 4
+#define ACPI_MAX64_BYTE_WIDTH 8
/*
* utglobal - Global data structures and procedures
@@ -266,7 +282,8 @@ acpi_ut_trace(u32 line_number,
void
acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, void *pointer);
+ const char *module_name,
+ u32 component_id, const void *pointer);
void
acpi_ut_trace_u32(u32 line_number,
@@ -276,7 +293,8 @@ acpi_ut_trace_u32(u32 line_number,
void
acpi_ut_trace_str(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, char *string);
+ const char *module_name,
+ u32 component_id, const char *string);
void
acpi_ut_exit(u32 line_number,
@@ -335,12 +353,12 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
*/
acpi_status
acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
- char *path,
+ const char *path,
u32 expected_return_btypes,
union acpi_operand_object **return_desc);
acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
+acpi_ut_evaluate_numeric_object(const char *object_name,
struct acpi_namespace_node *device_node,
u64 *value);
@@ -415,7 +433,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size);
union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size);
acpi_status
-acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
+acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size *obj_length);
/*
* utosi - Support for the _OSI predefined control method
@@ -526,15 +544,15 @@ void acpi_ut_set_integer_width(u8 revision);
void
acpi_ut_display_init_pathname(u8 type,
struct acpi_namespace_node *obj_handle,
- char *path);
+ const char *path);
#endif
/*
* utownerid - Support for Table/Method Owner IDs
*/
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id);
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
+void acpi_ut_release_owner_id(acpi_owner_id *owner_id);
/*
* utresrc
@@ -570,10 +588,6 @@ void acpi_ut_print_string(char *string, u16 max_length);
void ut_convert_backslashes(char *pathname);
#endif
-u8 acpi_ut_valid_acpi_name(char *name);
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position);
-
void acpi_ut_repair_name(char *name);
#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
@@ -628,7 +642,7 @@ void acpi_ut_dump_allocation_info(void);
void acpi_ut_dump_allocations(u32 component, const char *module);
acpi_status
-acpi_ut_create_list(char *list_name,
+acpi_ut_create_list(const char *list_name,
u16 object_size, struct acpi_memory_list **return_cache);
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 772178c96ccf..62bd446535f5 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -738,9 +738,9 @@ acpi_dm_test_resource_conversion(struct acpi_namespace_node *node, char *name)
original_aml = return_buffer.pointer;
acpi_dm_compare_aml_resources(original_aml->buffer.pointer,
- (acpi_rsdesc_size) original_aml->buffer.
+ (acpi_rsdesc_size)original_aml->buffer.
length, new_aml.pointer,
- (acpi_rsdesc_size) new_aml.length);
+ (acpi_rsdesc_size)new_aml.length);
/* Cleanup and exit */
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 68f4e0f4b095..7cd07b27f758 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -194,7 +194,7 @@ acpi_db_convert_to_buffer(char *string, union acpi_object *object)
*
******************************************************************************/
-acpi_status acpi_db_convert_to_package(char *string, union acpi_object * object)
+acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
{
char *this;
char *next;
@@ -252,7 +252,7 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object * object)
acpi_status
acpi_db_convert_to_object(acpi_object_type type,
- char *string, union acpi_object * object)
+ char *string, union acpi_object *object)
{
acpi_status status = AE_OK;
@@ -277,7 +277,9 @@ acpi_db_convert_to_object(acpi_object_type type,
default:
object->type = ACPI_TYPE_INTEGER;
- status = acpi_ut_strtoul64(string, 16, &object->integer.value);
+ status =
+ acpi_ut_strtoul64(string, 16, acpi_gbl_integer_byte_width,
+ &object->integer.value);
break;
}
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index c814855376e2..12df2915ad74 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -361,7 +361,7 @@ acpi_db_execution_walk(acpi_handle obj_handle,
******************************************************************************/
void
-acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags)
+acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
{
acpi_status status;
struct acpi_buffer return_obj;
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 417c02a89915..7cd5d2e022da 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -57,12 +57,12 @@ static u32 acpi_db_get_line(char *input_buffer);
static u32 acpi_db_match_command(char *user_command);
-static void acpi_db_display_command_info(char *command, u8 display_all);
+static void acpi_db_display_command_info(const char *command, u8 display_all);
static void acpi_db_display_help(char *command);
static u8
-acpi_db_match_command_help(char *command,
+acpi_db_match_command_help(const char *command,
const struct acpi_db_command_help *help);
/*
@@ -348,7 +348,7 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
******************************************************************************/
static u8
-acpi_db_match_command_help(char *command,
+acpi_db_match_command_help(const char *command,
const struct acpi_db_command_help *help)
{
char *invocation = help->invocation;
@@ -402,7 +402,7 @@ acpi_db_match_command_help(char *command,
*
******************************************************************************/
-static void acpi_db_display_command_info(char *command, u8 display_all)
+static void acpi_db_display_command_info(const char *command, u8 display_all)
{
const struct acpi_db_command_help *next;
u8 matched;
@@ -466,7 +466,7 @@ static void acpi_db_display_help(char *command)
******************************************************************************/
char *acpi_db_get_next_token(char *string,
- char **next, acpi_object_type * return_type)
+ char **next, acpi_object_type *return_type)
{
char *start;
u32 depth;
@@ -656,8 +656,9 @@ static u32 acpi_db_match_command(char *user_command)
}
for (i = CMD_FIRST_VALID; acpi_gbl_db_commands[i].name; i++) {
- if (strstr(acpi_gbl_db_commands[i].name, user_command) ==
- acpi_gbl_db_commands[i].name) {
+ if (strstr
+ (ACPI_CAST_PTR(char, acpi_gbl_db_commands[i].name),
+ user_command) == acpi_gbl_db_commands[i].name) {
return (i);
}
}
@@ -683,8 +684,8 @@ static u32 acpi_db_match_command(char *user_command)
acpi_status
acpi_db_command_dispatch(char *input_buffer,
- struct acpi_walk_state * walk_state,
- union acpi_parse_object * op)
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
{
u32 temp;
u32 command_index;
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 3c23b5a1079b..8667f14d535e 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -285,7 +285,7 @@ void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg)
u32 max_depth = ACPI_UINT32_MAX;
acpi_owner_id owner_id;
- owner_id = (acpi_owner_id) strtoul(owner_arg, NULL, 0);
+ owner_id = (acpi_owner_id)strtoul(owner_arg, NULL, 0);
/* Now we can check for the depth argument */
@@ -709,7 +709,7 @@ acpi_db_integrity_walk(acpi_handle obj_handle,
return (AE_OK);
}
- if (!acpi_ut_valid_acpi_name(node->name.ascii)) {
+ if (!acpi_ut_valid_nameseg(node->name.ascii)) {
acpi_os_printf("Invalid AcpiName for Node %p\n", node);
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dbutils.c b/drivers/acpi/acpica/dbutils.c
index b37a2c77b86b..ae80106d1000 100644
--- a/drivers/acpi/acpica/dbutils.c
+++ b/drivers/acpi/acpica/dbutils.c
@@ -56,8 +56,6 @@ acpi_status acpi_db_second_pass_parse(union acpi_parse_object *root);
void acpi_db_dump_buffer(u32 address);
#endif
-static char *gbl_hex_to_ascii = "0123456789ABCDEF";
-
/*******************************************************************************
*
* FUNCTION: acpi_db_match_argument
@@ -82,8 +80,9 @@ acpi_db_match_argument(char *user_argument,
}
for (i = 0; arguments[i].name; i++) {
- if (strstr(arguments[i].name, user_argument) ==
- arguments[i].name) {
+ if (strstr(ACPI_CAST_PTR(char, arguments[i].name),
+ ACPI_CAST_PTR(char,
+ user_argument)) == arguments[i].name) {
return (i);
}
}
@@ -339,7 +338,7 @@ void acpi_db_uint32_to_hex_string(u32 value, char *buffer)
buffer[8] = '\0';
for (i = 7; i >= 0; i--) {
- buffer[i] = gbl_hex_to_ascii[value & 0x0F];
+ buffer[i] = acpi_gbl_upper_hex_digits[value & 0x0F];
value = value >> 4;
}
}
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index e94e0d80bc7b..124db237775d 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -162,8 +162,8 @@ void acpi_db_signal_break_point(struct acpi_walk_state *walk_state)
******************************************************************************/
acpi_status
-acpi_db_single_step(struct acpi_walk_state * walk_state,
- union acpi_parse_object * op, u32 opcode_class)
+acpi_db_single_step(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, u32 opcode_class)
{
union acpi_parse_object *next;
acpi_status status = AE_OK;
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index c9a663f21ac8..4ddcbf100234 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -163,8 +163,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
******************************************************************************/
acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object * op)
+acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
{
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 5aa1c5feee50..f1e6dcc7a827 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -188,7 +188,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
acpi_status
acpi_ds_initialize_objects(u32 table_index,
- struct acpi_namespace_node * start_node)
+ struct acpi_namespace_node *start_node)
{
acpi_status status;
struct acpi_init_walk_info info;
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index da198b864107..47c7b52a519c 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -209,7 +209,7 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
******************************************************************************/
acpi_status
-acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
{
u32 aml_offset;
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 8ca9416320e0..f393de9f5887 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -569,7 +569,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/* TBD: May only be temporary */
obj_desc =
- acpi_ut_create_string_object((acpi_size) name_length);
+ acpi_ut_create_string_object((acpi_size)name_length);
strncpy(obj_desc->string.pointer,
name_string, name_length);
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index d1cedcfda1d2..fd34040d4f44 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -137,8 +137,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
******************************************************************************/
acpi_status
-acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object ** out_op)
+acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
{
union acpi_parse_object *op;
struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 0bac6e14170e..762db3fa70e0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -490,8 +490,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ds_create_index_field(op,
- (acpi_handle) arg->
- common.node, walk_state);
+ (acpi_handle)arg->common.
+ node, walk_state);
break;
case AML_BANK_FIELD_OP:
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 3a26ddbaed6d..e3338698e56b 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -143,8 +143,8 @@ acpi_ds_result_pop(union acpi_operand_object **object,
******************************************************************************/
acpi_status
-acpi_ds_result_push(union acpi_operand_object * object,
- struct acpi_walk_state * walk_state)
+acpi_ds_result_push(union acpi_operand_object *object,
+ struct acpi_walk_state *walk_state)
{
union acpi_generic_state *state;
acpi_status status;
@@ -307,7 +307,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
******************************************************************************/
acpi_status
-acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
+acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state)
{
ACPI_FUNCTION_NAME(ds_obj_stack_push);
@@ -354,7 +354,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
******************************************************************************/
acpi_status
-acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
+acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state)
{
u32 i;
@@ -411,7 +411,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
return;
}
- for (i = (s32) pop_count - 1; i >= 0; i--) {
+ for (i = (s32)pop_count - 1; i >= 0; i--) {
if (walk_state->num_operands == 0) {
return;
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index b47e62aaf654..4b4949ce05bc 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -440,7 +440,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
gpe_event_info =
&gpe_block->
- event_info[((acpi_size) i *
+ event_info[((acpi_size)i *
ACPI_GPE_REGISTER_WIDTH) + j];
gpe_number =
j + gpe_register_info->base_gpe_number;
@@ -652,7 +652,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
*
******************************************************************************/
-acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info)
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 447fa1cac64f..d54014cab01d 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -211,7 +211,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
/* Allocate the GPE register information block */
- gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+ gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
register_count *
sizeof(struct
acpi_gpe_register_info));
@@ -225,7 +225,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
+ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
sizeof(struct
acpi_gpe_event_info));
if (!gpe_event_info) {
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 66c4b5b7cd64..3f150d567e64 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -163,7 +163,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status
acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
- struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
+ struct acpi_gpe_xrupt_info **gpe_xrupt_block)
{
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
@@ -320,7 +320,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- gpe_event_info = &gpe_block->event_info[((acpi_size) i *
+ gpe_event_info = &gpe_block->event_info[((acpi_size)i *
ACPI_GPE_REGISTER_WIDTH)
+ j];
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 0f6be8956a99..24768ca03f19 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -359,7 +359,7 @@ union acpi_operand_object *acpi_ev_find_region_handler(acpi_adr_space_type
******************************************************************************/
acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+acpi_ev_install_space_handler(struct acpi_namespace_node *node,
acpi_adr_space_type space_id,
acpi_adr_space_handler handler,
acpi_adr_space_setup setup, void *context)
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index c67d78c5995f..f51d43adb7d1 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -99,8 +99,7 @@ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
******************************************************************************/
acpi_status
-acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
- u32 notify_value)
+acpi_ev_queue_notify_request(struct acpi_namespace_node *node, u32 notify_value)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *handler_list_head = NULL;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 63924d1c737a..4c6f79514040 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -526,81 +526,59 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
/*******************************************************************************
*
- * FUNCTION: acpi_ev_associate_reg_method
+ * FUNCTION: acpi_ev_execute_reg_method
*
* PARAMETERS: region_obj - Region object
+ * function - Passed to _REG: On (1) or Off (0)
*
* RETURN: Status
*
- * DESCRIPTION: Find and associate _REG method to a region
+ * DESCRIPTION: Execute _REG method for a region
*
******************************************************************************/
-void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj)
+acpi_status
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
{
- acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
+ union acpi_operand_object *region_obj2;
+ const acpi_name *reg_name_ptr =
+ ACPI_CAST_PTR(acpi_name, METHOD_NAME__REG);
struct acpi_namespace_node *method_node;
struct acpi_namespace_node *node;
- union acpi_operand_object *region_obj2;
acpi_status status;
- ACPI_FUNCTION_TRACE(ev_associate_reg_method);
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
+
+ if (!acpi_gbl_namespace_initialized ||
+ region_obj->region.handler == NULL) {
+ return_ACPI_STATUS(AE_OK);
+ }
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
- return_VOID;
+ return_ACPI_STATUS(AE_NOT_EXIST);
}
+ /*
+ * Find any "_REG" method associated with this region definition.
+ * The method should always be updated as this function may be
+ * invoked after a namespace change.
+ */
node = region_obj->region.node->parent;
-
- /* Find any "_REG" method associated with this region definition */
-
status =
acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
&method_node);
if (ACPI_SUCCESS(status)) {
/*
- * The _REG method is optional and there can be only one per region
- * definition. This will be executed when the handler is attached
- * or removed
+ * The _REG method is optional and there can be only one per
+ * region definition. This will be executed when the handler is
+ * attached or removed.
*/
region_obj2->extra.method_REG = method_node;
}
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_execute_reg_method
- *
- * PARAMETERS: region_obj - Region object
- * function - Passed to _REG: On (1) or Off (0)
- *
- * RETURN: Status
- *
- * DESCRIPTION: Execute _REG method for a region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
-{
- struct acpi_evaluate_info *info;
- union acpi_operand_object *args[3];
- union acpi_operand_object *region_obj2;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_execute_reg_method);
-
- region_obj2 = acpi_ns_get_secondary_object(region_obj);
- if (!region_obj2) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
- if (region_obj2->extra.method_REG == NULL ||
- region_obj->region.handler == NULL ||
- !acpi_gbl_namespace_initialized) {
+ if (region_obj2->extra.method_REG == NULL) {
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index fda869c9ad0b..b6ea9c0d0d8c 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -227,7 +227,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Install a handler for this PCI root bridge */
- status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+ status = acpi_install_address_space_handler((acpi_handle)pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) {
/*
@@ -518,7 +518,6 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_OK);
}
- acpi_ev_associate_reg_method(region_obj);
region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
node = region_obj->region.node->parent;
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 90456714821f..17cfef721d00 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -917,7 +917,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
* the FADT-defined gpe blocks. Otherwise, the GPE block device.
*
******************************************************************************/
-acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device)
+acpi_status acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
{
struct acpi_gpe_device_info info;
acpi_status status;
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
new file mode 100644
index 000000000000..2423fe03e879
--- /dev/null
+++ b/drivers/acpi/acpica/exconcat.c
@@ -0,0 +1,439 @@
+/******************************************************************************
+ *
+ * Module Name: exconcat - Concatenate-type AML operators
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2016, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlresrc.h"
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exconcat")
+
+/* Local Prototypes */
+static acpi_status
+acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_concatenate
+ *
+ * PARAMETERS: operand0 - First source object
+ * operand1 - Second source object
+ * actual_return_desc - Where to place the return object
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Concatenate two objects with the ACPI-defined conversion
+ * rules as necessary.
+ * NOTE:
+ * Per the ACPI spec (up to 6.1), Concatenate only supports Integer,
+ * String, and Buffer objects. However, we support all objects here
+ * as an extension. This improves the usefulness of both Concatenate
+ * and the Printf/Fprintf macros. The extension returns a string
+ * describing the object type for the other objects.
+ * 02/2016.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_concatenate(union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *local_operand0 = operand0;
+ union acpi_operand_object *local_operand1 = operand1;
+ union acpi_operand_object *temp_operand1 = NULL;
+ union acpi_operand_object *return_desc;
+ char *buffer;
+ acpi_object_type operand0_type;
+ acpi_object_type operand1_type;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_do_concatenate);
+
+ /* Operand 0 preprocessing */
+
+ switch (operand0->common.type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ operand0_type = operand0->common.type;
+ break;
+
+ default:
+
+ /* For all other types, get the "object type" string */
+
+ status =
+ acpi_ex_convert_to_object_type_string(operand0,
+ &local_operand0);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ operand0_type = ACPI_TYPE_STRING;
+ break;
+ }
+
+ /* Operand 1 preprocessing */
+
+ switch (operand1->common.type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ operand1_type = operand1->common.type;
+ break;
+
+ default:
+
+ /* For all other types, get the "object type" string */
+
+ status =
+ acpi_ex_convert_to_object_type_string(operand1,
+ &local_operand1);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ operand1_type = ACPI_TYPE_STRING;
+ break;
+ }
+
+ /*
+ * Convert the second operand if necessary. The first operand (0)
+ * determines the type of the second operand (1) (See the Data Types
+ * section of the ACPI specification). Both object types are
+ * guaranteed to be either Integer/String/Buffer by the operand
+ * resolution mechanism.
+ */
+ switch (operand0_type) {
+ case ACPI_TYPE_INTEGER:
+
+ status =
+ acpi_ex_convert_to_integer(local_operand1, &temp_operand1,
+ 16);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ status =
+ acpi_ex_convert_to_buffer(local_operand1, &temp_operand1);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ switch (operand1_type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /* Other types have already been converted to string */
+
+ status =
+ acpi_ex_convert_to_string(local_operand1,
+ &temp_operand1,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ break;
+
+ default:
+
+ status = AE_OK;
+ break;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
+ operand0->common.type));
+ status = AE_AML_INTERNAL;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Take care with any newly created operand objects */
+
+ if ((local_operand1 != operand1) && (local_operand1 != temp_operand1)) {
+ acpi_ut_remove_reference(local_operand1);
+ }
+
+ local_operand1 = temp_operand1;
+
+ /*
+ * Both operands are now known to be the same object type
+ * (Both are Integer, String, or Buffer), and we can now perform
+ * the concatenation.
+ *
+ * There are three cases to handle, as per the ACPI spec:
+ *
+ * 1) Two Integers concatenated to produce a new Buffer
+ * 2) Two Strings concatenated to produce a new String
+ * 3) Two Buffers concatenated to produce a new Buffer
+ */
+ switch (operand0_type) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Result of two Integers is a Buffer */
+ /* Need enough buffer space for two integers */
+
+ return_desc = acpi_ut_create_buffer_object((acpi_size)
+ ACPI_MUL_2
+ (acpi_gbl_integer_byte_width));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ buffer = (char *)return_desc->buffer.pointer;
+
+ /* Copy the first integer, LSB first */
+
+ memcpy(buffer, &operand0->integer.value,
+ acpi_gbl_integer_byte_width);
+
+ /* Copy the second integer (LSB first) after the first */
+
+ memcpy(buffer + acpi_gbl_integer_byte_width,
+ &local_operand1->integer.value,
+ acpi_gbl_integer_byte_width);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ /* Result of two Strings is a String */
+
+ return_desc = acpi_ut_create_string_object(((acpi_size)
+ local_operand0->
+ string.length +
+ local_operand1->
+ string.length));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ buffer = return_desc->string.pointer;
+
+ /* Concatenate the strings */
+
+ strcpy(buffer, local_operand0->string.pointer);
+ strcat(buffer, local_operand1->string.pointer);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Result of two Buffers is a Buffer */
+
+ return_desc = acpi_ut_create_buffer_object(((acpi_size)
+ operand0->buffer.
+ length +
+ local_operand1->
+ buffer.length));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ buffer = (char *)return_desc->buffer.pointer;
+
+ /* Concatenate the buffers */
+
+ memcpy(buffer, operand0->buffer.pointer,
+ operand0->buffer.length);
+ memcpy(buffer + operand0->buffer.length,
+ local_operand1->buffer.pointer,
+ local_operand1->buffer.length);
+ break;
+
+ default:
+
+ /* Invalid object type, should not happen here */
+
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
+ operand0->common.type));
+ status = AE_AML_INTERNAL;
+ goto cleanup;
+ }
+
+ *actual_return_desc = return_desc;
+
+cleanup:
+ if (local_operand0 != operand0) {
+ acpi_ut_remove_reference(local_operand0);
+ }
+
+ if (local_operand1 != operand1) {
+ acpi_ut_remove_reference(local_operand1);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_convert_to_object_type_string
+ *
+ * PARAMETERS: obj_desc - Object to be converted
+ * return_desc - Where to place the return object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an object of arbitrary type to a string object that
+ * contains the namestring for the object. Used for the
+ * concatenate operator.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc)
+{
+ union acpi_operand_object *return_desc;
+ const char *type_string;
+
+ type_string = acpi_ut_get_type_name(obj_desc->common.type);
+
+ return_desc = acpi_ut_create_string_object(((acpi_size)strlen(type_string) + 9)); /* 9 For "[ Object]" */
+ if (!return_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ strcpy(return_desc->string.pointer, "[");
+ strcat(return_desc->string.pointer, type_string);
+ strcat(return_desc->string.pointer, " Object]");
+
+ *result_desc = return_desc;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_concat_template
+ *
+ * PARAMETERS: operand0 - First source object
+ * operand1 - Second source object
+ * actual_return_desc - Where to place the return object
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Concatenate two resource templates
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_concat_template(union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_operand_object *return_desc;
+ u8 *new_buf;
+ u8 *end_tag;
+ acpi_size length0;
+ acpi_size length1;
+ acpi_size new_length;
+
+ ACPI_FUNCTION_TRACE(ex_concat_template);
+
+ /*
+ * Find the end_tag descriptor in each resource template.
+ * Note1: returned pointers point TO the end_tag, not past it.
+ * Note2: zero-length buffers are allowed; treated like one end_tag
+ */
+
+ /* Get the length of the first resource template */
+
+ status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
+
+ /* Get the length of the second resource template */
+
+ status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
+
+ /* Combine both lengths, minimum size will be 2 for end_tag */
+
+ new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
+
+ /* Create a new buffer object for the result (with one end_tag) */
+
+ return_desc = acpi_ut_create_buffer_object(new_length);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * Copy the templates to the new buffer, 0 first, then 1 follows. One
+ * end_tag descriptor is copied from Operand1.
+ */
+ new_buf = return_desc->buffer.pointer;
+ memcpy(new_buf, operand0->buffer.pointer, length0);
+ memcpy(new_buf + length0, operand1->buffer.pointer, length1);
+
+ /* Insert end_tag and set the checksum to zero, means "ignore checksum" */
+
+ new_buf[new_length - 1] = 0;
+ new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
+
+ /* Return the completed resource template */
+
+ *actual_return_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index f74161301037..a1d177d58254 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -118,7 +118,9 @@ acpi_ex_add_table(u32 table_index,
/* Execute any module-level code that was found in the table */
acpi_ex_exit_interpreter();
- acpi_ns_exec_module_code_list();
+ if (acpi_gbl_group_module_level_code) {
+ acpi_ns_exec_module_code_list();
+ }
acpi_ex_enter_interpreter();
/*
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 0b9f2c13b98a..b7e9b3d803e1 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -124,7 +124,9 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
* of ACPI 3.0) is that the to_integer() operator allows both decimal
* and hexadecimal strings (hex prefixed with "0x").
*/
- status = acpi_ut_strtoul64((char *)pointer, flags, &result);
+ status = acpi_ut_strtoul64((char *)pointer, flags,
+ acpi_gbl_integer_byte_width,
+ &result);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -439,7 +441,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* Need enough space for one ASCII integer (plus null terminator)
*/
return_desc =
- acpi_ut_create_string_object((acpi_size) string_length);
+ acpi_ut_create_string_object((acpi_size)string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -518,7 +520,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
}
return_desc =
- acpi_ut_create_string_object((acpi_size) string_length);
+ acpi_ut_create_string_object((acpi_size)string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index bea9612e4720..613ba6eb08bb 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -394,7 +394,7 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
obj_desc->processor.length = (u8) operand[3]->integer.value;
obj_desc->processor.address =
- (acpi_io_address) operand[2]->integer.value;
+ (acpi_io_address)operand[2]->integer.value;
/* Install the processor object in the parent Node */
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index ee30974b245a..fce6b2e10209 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -55,9 +55,9 @@ ACPI_MODULE_NAME("exdump")
*/
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/* Local prototypes */
-static void acpi_ex_out_string(char *title, char *value);
+static void acpi_ex_out_string(const char *title, const char *value);
-static void acpi_ex_out_pointer(char *title, void *value);
+static void acpi_ex_out_pointer(const char *title, const void *value);
static void
acpi_ex_dump_object(union acpi_operand_object *obj_desc,
@@ -365,8 +365,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
struct acpi_exdump_info *info)
{
u8 *target;
- char *name;
- const char *reference_name;
+ const char *name;
u8 count;
union acpi_operand_object *start;
union acpi_operand_object *data = NULL;
@@ -459,9 +458,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
case ACPI_EXD_REFERENCE:
- reference_name = acpi_ut_get_reference_name(obj_desc);
acpi_ex_out_string("Class Name",
- ACPI_CAST_PTR(char, reference_name));
+ acpi_ut_get_reference_name
+ (obj_desc));
acpi_ex_dump_reference_obj(obj_desc);
break;
@@ -934,12 +933,12 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
*
******************************************************************************/
-static void acpi_ex_out_string(char *title, char *value)
+static void acpi_ex_out_string(const char *title, const char *value)
{
acpi_os_printf("%20s : %s\n", title, value);
}
-static void acpi_ex_out_pointer(char *title, void *value)
+static void acpi_ex_out_pointer(const char *title, const void *value)
{
acpi_os_printf("%20s : %p\n", title, value);
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index d5d8020a8523..d7d3ee36338b 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -126,7 +126,7 @@ acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
******************************************************************************/
acpi_status
-acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
+acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc,
union acpi_operand_object **ret_buffer_desc)
{
@@ -233,7 +233,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
* Note: Field.length is in bits.
*/
length =
- (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
+ (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
if (length > acpi_gbl_integer_byte_width) {
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f0c5ed0b7db8..ee76d299b3d0 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -164,7 +164,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
if (ACPI_ROUND_UP(rgn_desc->region.length,
obj_desc->common_field.
access_byte_width) >=
- ((acpi_size) obj_desc->common_field.
+ ((acpi_size)obj_desc->common_field.
base_byte_offset +
obj_desc->common_field.access_byte_width +
field_datum_byte_offset)) {
@@ -897,17 +897,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
- /*
- * Create the bitmasks used for bit insertion.
- * Note: This if/else is used to bypass compiler differences with the
- * shift operator
- */
- if (access_bit_width == ACPI_INTEGER_BIT_SIZE) {
- width_mask = ACPI_UINT64_MAX;
- } else {
- width_mask = ACPI_MASK_BITS_ABOVE(access_bit_width);
- }
+ /* Create the bitmasks used for bit insertion */
+ width_mask = ACPI_MASK_BITS_ABOVE_64(access_bit_width);
mask = width_mask &
ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index db30ae43ddd8..4f7e667624b3 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -45,7 +45,6 @@
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
-#include "amlresrc.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exmisc")
@@ -140,295 +139,6 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
/*******************************************************************************
*
- * FUNCTION: acpi_ex_concat_template
- *
- * PARAMETERS: operand0 - First source object
- * operand1 - Second source object
- * actual_return_desc - Where to place the return object
- * walk_state - Current walk state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Concatenate two resource templates
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_concat_template(union acpi_operand_object *operand0,
- union acpi_operand_object *operand1,
- union acpi_operand_object **actual_return_desc,
- struct acpi_walk_state *walk_state)
-{
- acpi_status status;
- union acpi_operand_object *return_desc;
- u8 *new_buf;
- u8 *end_tag;
- acpi_size length0;
- acpi_size length1;
- acpi_size new_length;
-
- ACPI_FUNCTION_TRACE(ex_concat_template);
-
- /*
- * Find the end_tag descriptor in each resource template.
- * Note1: returned pointers point TO the end_tag, not past it.
- * Note2: zero-length buffers are allowed; treated like one end_tag
- */
-
- /* Get the length of the first resource template */
-
- status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
-
- /* Get the length of the second resource template */
-
- status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
-
- /* Combine both lengths, minimum size will be 2 for end_tag */
-
- new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
-
- /* Create a new buffer object for the result (with one end_tag) */
-
- return_desc = acpi_ut_create_buffer_object(new_length);
- if (!return_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /*
- * Copy the templates to the new buffer, 0 first, then 1 follows. One
- * end_tag descriptor is copied from Operand1.
- */
- new_buf = return_desc->buffer.pointer;
- memcpy(new_buf, operand0->buffer.pointer, length0);
- memcpy(new_buf + length0, operand1->buffer.pointer, length1);
-
- /* Insert end_tag and set the checksum to zero, means "ignore checksum" */
-
- new_buf[new_length - 1] = 0;
- new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
-
- /* Return the completed resource template */
-
- *actual_return_desc = return_desc;
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ex_do_concatenate
- *
- * PARAMETERS: operand0 - First source object
- * operand1 - Second source object
- * actual_return_desc - Where to place the return object
- * walk_state - Current walk state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_do_concatenate(union acpi_operand_object *operand0,
- union acpi_operand_object *operand1,
- union acpi_operand_object **actual_return_desc,
- struct acpi_walk_state *walk_state)
-{
- union acpi_operand_object *local_operand1 = operand1;
- union acpi_operand_object *return_desc;
- char *new_buf;
- const char *type_string;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ex_do_concatenate);
-
- /*
- * Convert the second operand if necessary. The first operand
- * determines the type of the second operand, (See the Data Types
- * section of the ACPI specification.) Both object types are
- * guaranteed to be either Integer/String/Buffer by the operand
- * resolution mechanism.
- */
- switch (operand0->common.type) {
- case ACPI_TYPE_INTEGER:
-
- status =
- acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
- break;
-
- case ACPI_TYPE_STRING:
- /*
- * Per the ACPI spec, Concatenate only supports int/str/buf.
- * However, we support all objects here as an extension.
- * This improves the usefulness of the Printf() macro.
- * 12/2015.
- */
- switch (operand1->common.type) {
- case ACPI_TYPE_INTEGER:
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
-
- status =
- acpi_ex_convert_to_string(operand1, &local_operand1,
- ACPI_IMPLICIT_CONVERT_HEX);
- break;
-
- default:
- /*
- * Just emit a string containing the object type.
- */
- type_string =
- acpi_ut_get_type_name(operand1->common.type);
-
- local_operand1 = acpi_ut_create_string_object(((acpi_size) strlen(type_string) + 9)); /* 9 For "[Object]" */
- if (!local_operand1) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- strcpy(local_operand1->string.pointer, "[");
- strcat(local_operand1->string.pointer, type_string);
- strcat(local_operand1->string.pointer, " Object]");
- status = AE_OK;
- break;
- }
- break;
-
- case ACPI_TYPE_BUFFER:
-
- status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
- break;
-
- default:
-
- ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
- operand0->common.type));
- status = AE_AML_INTERNAL;
- }
-
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- /*
- * Both operands are now known to be the same object type
- * (Both are Integer, String, or Buffer), and we can now perform the
- * concatenation.
- */
-
- /*
- * There are three cases to handle:
- *
- * 1) Two Integers concatenated to produce a new Buffer
- * 2) Two Strings concatenated to produce a new String
- * 3) Two Buffers concatenated to produce a new Buffer
- */
- switch (operand0->common.type) {
- case ACPI_TYPE_INTEGER:
-
- /* Result of two Integers is a Buffer */
- /* Need enough buffer space for two integers */
-
- return_desc = acpi_ut_create_buffer_object((acpi_size)
- ACPI_MUL_2
- (acpi_gbl_integer_byte_width));
- if (!return_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- new_buf = (char *)return_desc->buffer.pointer;
-
- /* Copy the first integer, LSB first */
-
- memcpy(new_buf, &operand0->integer.value,
- acpi_gbl_integer_byte_width);
-
- /* Copy the second integer (LSB first) after the first */
-
- memcpy(new_buf + acpi_gbl_integer_byte_width,
- &local_operand1->integer.value,
- acpi_gbl_integer_byte_width);
- break;
-
- case ACPI_TYPE_STRING:
-
- /* Result of two Strings is a String */
-
- return_desc = acpi_ut_create_string_object(((acpi_size)
- operand0->string.
- length +
- local_operand1->
- string.length));
- if (!return_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- new_buf = return_desc->string.pointer;
-
- /* Concatenate the strings */
-
- strcpy(new_buf, operand0->string.pointer);
- strcat(new_buf, local_operand1->string.pointer);
- break;
-
- case ACPI_TYPE_BUFFER:
-
- /* Result of two Buffers is a Buffer */
-
- return_desc = acpi_ut_create_buffer_object(((acpi_size)
- operand0->buffer.
- length +
- local_operand1->
- buffer.length));
- if (!return_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- new_buf = (char *)return_desc->buffer.pointer;
-
- /* Concatenate the buffers */
-
- memcpy(new_buf, operand0->buffer.pointer,
- operand0->buffer.length);
- memcpy(new_buf + operand0->buffer.length,
- local_operand1->buffer.pointer,
- local_operand1->buffer.length);
- break;
-
- default:
-
- /* Invalid object type, should not happen here */
-
- ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
- operand0->common.type));
- status = AE_AML_INTERNAL;
- goto cleanup;
- }
-
- *actual_return_desc = return_desc;
-
-cleanup:
- if (local_operand1 != operand1) {
- acpi_ut_remove_reference(local_operand1);
- }
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_do_math_op
*
* PARAMETERS: opcode - AML opcode
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 27c11ab5eb04..3d6af93fe561 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -178,7 +178,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
for (index = 0;
(index < ACPI_NAME_SIZE)
- && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
+ && (acpi_ut_valid_name_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
}
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 5aa21c4eda1d..69e4e269ad2f 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -184,7 +184,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* Get the Integer values from the objects */
index = operand[1]->integer.value;
- length = (acpi_size) operand[2]->integer.value;
+ length = (acpi_size)operand[2]->integer.value;
/*
* If the index is beyond the length of the String/Buffer, or if the
@@ -198,8 +198,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
else if ((index + length) > operand[0]->string.length) {
length =
- (acpi_size) operand[0]->string.length -
- (acpi_size) index;
+ (acpi_size)operand[0]->string.length -
+ (acpi_size)index;
}
/* Strings always have a sub-pointer, not so for buffers */
@@ -209,7 +209,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* Always allocate a new buffer for the String */
- buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+ buffer = ACPI_ALLOCATE_ZEROED((acpi_size)length + 1);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index e2b63483857f..786d53b0bb37 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -207,7 +207,7 @@ acpi_ex_do_match(u32 match_op,
*
******************************************************************************/
-acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
+acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc = NULL;
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 076074daf2b6..31b381cae94d 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -325,15 +325,15 @@ acpi_ex_system_io_space_handler(u32 function,
switch (function) {
case ACPI_READ:
- status = acpi_hw_read_port((acpi_io_address) address,
+ status = acpi_hw_read_port((acpi_io_address)address,
&value32, bit_width);
*value = value32;
break;
case ACPI_WRITE:
- status = acpi_hw_write_port((acpi_io_address) address,
- (u32) * value, bit_width);
+ status = acpi_hw_write_port((acpi_io_address)address,
+ (u32)*value, bit_width);
break;
default:
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index c1e8bfb0f7f4..a183cb740d24 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -93,7 +93,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
*/
node = *object_ptr;
source_desc = acpi_ns_get_attached_object(node);
- entry_type = acpi_ns_get_type((acpi_handle) node);
+ entry_type = acpi_ns_get_type((acpi_handle)node);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
node, source_desc,
@@ -106,7 +106,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
source_desc = acpi_ns_get_attached_object(node);
- entry_type = acpi_ns_get_type((acpi_handle) node);
+ entry_type = acpi_ns_get_type((acpi_handle)node);
*object_ptr = node;
}
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fedacf13dc36..e1d3878be2c6 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -334,7 +334,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
acpi_status
acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
union acpi_operand_object *operand,
- acpi_object_type * return_type,
+ acpi_object_type *return_type,
union acpi_operand_object **return_desc)
{
union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index cc2c26c46a6d..27b41fd7542d 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -131,8 +131,8 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
acpi_status
acpi_ex_resolve_operands(u16 opcode,
- union acpi_operand_object ** stack_ptr,
- struct acpi_walk_state * walk_state)
+ union acpi_operand_object **stack_ptr,
+ struct acpi_walk_state *walk_state)
{
union acpi_operand_object *obj_desc;
acpi_status status = AE_OK;
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 28b724827f0f..1dab82746d06 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -188,7 +188,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
* Clear old string and copy in the new one
*/
memset(target_desc->string.pointer, 0,
- (acpi_size) target_desc->string.length + 1);
+ (acpi_size)target_desc->string.length + 1);
memcpy(target_desc->string.pointer, buffer, length);
} else {
/*
@@ -204,7 +204,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
}
target_desc->string.pointer =
- ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+ ACPI_ALLOCATE_ZEROED((acpi_size)length + 1);
if (!target_desc->string.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4d44bc1cb2ca..425f13372e68 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -301,8 +301,8 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
*
* FUNCTION: acpi_ex_eisa_id_to_string
*
- * PARAMETERS: compressed_id - EISAID to be converted
- * out_string - Where to put the converted string (8 bytes)
+ * PARAMETERS: out_string - Where to put the converted string (8 bytes)
+ * compressed_id - EISAID to be converted
*
* RETURN: None
*
@@ -354,7 +354,7 @@ void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id)
* possible 64-bit integer.
* value - Value to be converted
*
- * RETURN: None, string
+ * RETURN: Converted string in out_string
*
* DESCRIPTION: Convert a 64-bit integer to decimal string representation.
* Assumes string buffer is large enough to hold the string. The
@@ -384,9 +384,9 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
* FUNCTION: acpi_ex_pci_cls_to_string
*
* PARAMETERS: out_string - Where to put the converted string (7 bytes)
- * PARAMETERS: class_code - PCI class code to be converted (3 bytes)
+ * class_code - PCI class code to be converted (3 bytes)
*
- * RETURN: None
+ * RETURN: Converted string in out_string
*
* DESCRIPTION: Convert 3-bytes PCI class code to string representation.
* Return buffer must be large enough to hold the string. The
@@ -417,7 +417,7 @@ void acpi_ex_pci_cls_to_string(char *out_string, u8 class_code[3])
*
* PARAMETERS: space_id - ID to be validated
*
- * RETURN: TRUE if valid/supported ID.
+ * RETURN: TRUE if space_id is a valid/supported ID.
*
* DESCRIPTION: Validate an operation region space_ID.
*
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 1c4f4518611a..bdecd5e76e87 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -166,7 +166,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
*
******************************************************************************/
-acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
+acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
@@ -206,7 +206,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
******************************************************************************/
acpi_status
-acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
+acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_event_status *event_status)
{
u32 in_byte;
@@ -391,7 +391,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info * gpe_block,
+ struct acpi_gpe_block_info *gpe_block,
void *context)
{
u32 i;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5ba0498412fd..0f18dbc9a37f 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -51,6 +51,10 @@ ACPI_MODULE_NAME("hwregs")
#if (!ACPI_REDUCED_HARDWARE)
/* Local Prototypes */
+static u8
+acpi_hw_get_access_bit_width(struct acpi_generic_address *reg,
+ u8 max_bit_width);
+
static acpi_status
acpi_hw_read_multiple(u32 *value,
struct acpi_generic_address *register_a,
@@ -65,6 +69,48 @@ acpi_hw_write_multiple(u32 value,
/******************************************************************************
*
+ * FUNCTION: acpi_hw_get_access_bit_width
+ *
+ * PARAMETERS: reg - GAS register structure
+ * max_bit_width - Max bit_width supported (32 or 64)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtain optimal access bit width
+ *
+ ******************************************************************************/
+
+static u8
+acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
+{
+ u64 address;
+
+ if (!reg->access_width) {
+ /*
+ * Detect old register descriptors where only the bit_width field
+ * makes senses. The target address is copied to handle possible
+ * alignment issues.
+ */
+ ACPI_MOVE_64_TO_64(&address, &reg->address);
+ if (!reg->bit_offset && reg->bit_width &&
+ ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
+ ACPI_IS_ALIGNED(reg->bit_width, 8) &&
+ ACPI_IS_ALIGNED(address, reg->bit_width)) {
+ return (reg->bit_width);
+ } else {
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ return (32);
+ } else {
+ return (max_bit_width);
+ }
+ }
+ } else {
+ return (1 << (reg->access_width + 2));
+ }
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_hw_validate_register
*
* PARAMETERS: reg - GAS register structure
@@ -83,6 +129,8 @@ acpi_status
acpi_hw_validate_register(struct acpi_generic_address *reg,
u8 max_bit_width, u64 *address)
{
+ u8 bit_width;
+ u8 access_width;
/* Must have a valid pointer to a GAS structure */
@@ -109,23 +157,25 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
return (AE_SUPPORT);
}
- /* Validate the bit_width */
+ /* Validate the access_width */
- if ((reg->bit_width != 8) &&
- (reg->bit_width != 16) &&
- (reg->bit_width != 32) && (reg->bit_width != max_bit_width)) {
+ if (reg->access_width > 4) {
ACPI_ERROR((AE_INFO,
- "Unsupported register bit width: 0x%X",
- reg->bit_width));
+ "Unsupported register access width: 0x%X",
+ reg->access_width));
return (AE_SUPPORT);
}
- /* Validate the bit_offset. Just a warning for now. */
+ /* Validate the bit_width, convert access_width into number of bits */
- if (reg->bit_offset != 0) {
+ access_width = acpi_hw_get_access_bit_width(reg, max_bit_width);
+ bit_width =
+ ACPI_ROUND_UP(reg->bit_offset + reg->bit_width, access_width);
+ if (max_bit_width < bit_width) {
ACPI_WARNING((AE_INFO,
- "Unsupported register bit offset: 0x%X",
- reg->bit_offset));
+ "Requested bit width 0x%X is smaller than register bit width 0x%X",
+ max_bit_width, bit_width));
+ return (AE_SUPPORT);
}
return (AE_OK);
@@ -145,17 +195,19 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
* 64-bit values is not needed.
*
* LIMITATIONS: <These limitations also apply to acpi_hw_write>
- * bit_width must be exactly 8, 16, or 32.
* space_ID must be system_memory or system_IO.
- * bit_offset and access_width are currently ignored, as there has
- * not been a need to implement these.
*
******************************************************************************/
acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
{
u64 address;
+ u8 access_width;
+ u32 bit_width;
+ u8 bit_offset;
u64 value64;
+ u32 value32;
+ u8 index;
acpi_status status;
ACPI_FUNCTION_NAME(hw_read);
@@ -167,28 +219,75 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
return (status);
}
- /* Initialize entire 32-bit return value to zero */
-
+ /*
+ * Initialize entire 32-bit return value to zero, convert access_width
+ * into number of bits based
+ */
*value = 0;
+ access_width = acpi_hw_get_access_bit_width(reg, 32);
+ bit_width = reg->bit_offset + reg->bit_width;
+ bit_offset = reg->bit_offset;
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_read_memory((acpi_physical_address)
- address, &value64, reg->bit_width);
+ index = 0;
+ while (bit_width) {
+ if (bit_offset >= access_width) {
+ value32 = 0;
+ bit_offset -= access_width;
+ } else {
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status =
+ acpi_os_read_memory((acpi_physical_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ &value64, access_width);
+ value32 = (u32)value64;
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ status = acpi_hw_read_port((acpi_io_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ &value32,
+ access_width);
+ }
+
+ /*
+ * Use offset style bit masks because:
+ * bit_offset < access_width/bit_width < access_width, and
+ * access_width is ensured to be less than 32-bits by
+ * acpi_hw_validate_register().
+ */
+ if (bit_offset) {
+ value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
+ bit_offset = 0;
+ }
+ if (bit_width < access_width) {
+ value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
+ }
+ }
- *value = (u32)value64;
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+ /*
+ * Use offset style bit writes because "Index * AccessWidth" is
+ * ensured to be less than 32-bits by acpi_hw_validate_register().
+ */
+ ACPI_SET_BITS(value, index * access_width,
+ ACPI_MASK_BITS_ABOVE_32(access_width), value32);
- status = acpi_hw_read_port((acpi_io_address)
- address, value, reg->bit_width);
+ bit_width -=
+ bit_width > access_width ? access_width : bit_width;
+ index++;
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
- *value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+ *value, access_width, ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
@@ -212,6 +311,12 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
{
u64 address;
+ u8 access_width;
+ u32 bit_width;
+ u8 bit_offset;
+ u64 value64;
+ u32 new_value32, old_value32;
+ u8 index;
acpi_status status;
ACPI_FUNCTION_NAME(hw_write);
@@ -223,23 +328,145 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
return (status);
}
+ /* Convert access_width into number of bits based */
+
+ access_width = acpi_hw_get_access_bit_width(reg, 32);
+ bit_width = reg->bit_offset + reg->bit_width;
+ bit_offset = reg->bit_offset;
+
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_write_memory((acpi_physical_address)
- address, (u64)value,
- reg->bit_width);
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- status = acpi_hw_write_port((acpi_io_address)
- address, value, reg->bit_width);
+ index = 0;
+ while (bit_width) {
+ /*
+ * Use offset style bit reads because "Index * AccessWidth" is
+ * ensured to be less than 32-bits by acpi_hw_validate_register().
+ */
+ new_value32 = ACPI_GET_BITS(&value, index * access_width,
+ ACPI_MASK_BITS_ABOVE_32
+ (access_width));
+
+ if (bit_offset >= access_width) {
+ bit_offset -= access_width;
+ } else {
+ /*
+ * Use offset style bit masks because access_width is ensured
+ * to be less than 32-bits by acpi_hw_validate_register() and
+ * bit_offset/bit_width is less than access_width here.
+ */
+ if (bit_offset) {
+ new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
+ }
+ if (bit_width < access_width) {
+ new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
+ }
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ if (bit_offset || bit_width < access_width) {
+ /*
+ * Read old values in order not to modify the bits that
+ * are beyond the register bit_width/bit_offset setting.
+ */
+ status =
+ acpi_os_read_memory((acpi_physical_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ &value64,
+ access_width);
+ old_value32 = (u32)value64;
+
+ /*
+ * Use offset style bit masks because access_width is
+ * ensured to be less than 32-bits by
+ * acpi_hw_validate_register() and bit_offset/bit_width is
+ * less than access_width here.
+ */
+ if (bit_offset) {
+ old_value32 &=
+ ACPI_MASK_BITS_ABOVE
+ (bit_offset);
+ bit_offset = 0;
+ }
+ if (bit_width < access_width) {
+ old_value32 &=
+ ACPI_MASK_BITS_BELOW
+ (bit_width);
+ }
+
+ new_value32 |= old_value32;
+ }
+
+ value64 = (u64)new_value32;
+ status =
+ acpi_os_write_memory((acpi_physical_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ value64, access_width);
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ if (bit_offset || bit_width < access_width) {
+ /*
+ * Read old values in order not to modify the bits that
+ * are beyond the register bit_width/bit_offset setting.
+ */
+ status =
+ acpi_hw_read_port((acpi_io_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ &old_value32,
+ access_width);
+
+ /*
+ * Use offset style bit masks because access_width is
+ * ensured to be less than 32-bits by
+ * acpi_hw_validate_register() and bit_offset/bit_width is
+ * less than access_width here.
+ */
+ if (bit_offset) {
+ old_value32 &=
+ ACPI_MASK_BITS_ABOVE
+ (bit_offset);
+ bit_offset = 0;
+ }
+ if (bit_width < access_width) {
+ old_value32 &=
+ ACPI_MASK_BITS_BELOW
+ (bit_width);
+ }
+
+ new_value32 |= old_value32;
+ }
+
+ status = acpi_hw_write_port((acpi_io_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ new_value32,
+ access_width);
+ }
+ }
+
+ /*
+ * Index * access_width is ensured to be less than 32-bits by
+ * acpi_hw_validate_register().
+ */
+ bit_width -=
+ bit_width > access_width ? access_width : bit_width;
+ index++;
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
- value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+ value, access_width, ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index a01ddb393a55..98c26ff39409 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -91,10 +91,9 @@ acpi_status acpi_reset(void)
* compatibility with other ACPI implementations that have allowed
* BIOS code with bad register width values to go unnoticed.
*/
- status =
- acpi_os_write_port((acpi_io_address) reset_reg->address,
- acpi_gbl_FADT.reset_value,
- ACPI_RESET_REGISTER_WIDTH);
+ status = acpi_os_write_port((acpi_io_address)reset_reg->address,
+ acpi_gbl_FADT.reset_value,
+ ACPI_RESET_REGISTER_WIDTH);
} else {
/* Write the reset value to the reset register */
@@ -504,9 +503,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
* Evaluate the \_Sx namespace object containing the register values
* for this state
*/
- info->relative_pathname = ACPI_CAST_PTR(char,
- acpi_gbl_sleep_state_names
- [sleep_state]);
+ info->relative_pathname = acpi_gbl_sleep_state_names[sleep_state];
status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 697af810e5ad..426a6307eafa 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -107,9 +107,10 @@ acpi_status acpi_ns_root_initialize(void)
continue;
}
- status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
- ACPI_IMODE_LOAD_PASS2,
- ACPI_NS_NO_UPSEARCH, NULL, &new_node);
+ status =
+ acpi_ns_lookup(NULL, (char *)init_val->name, init_val->type,
+ ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH,
+ NULL, &new_node);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not create predefined name %s",
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 878e8fb6a64c..c803bda7915c 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -79,7 +79,8 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
/* String-to-Integer conversion */
status = acpi_ut_strtoul64(original_object->string.pointer,
- ACPI_ANY_BASE, &value);
+ ACPI_ANY_BASE,
+ acpi_gbl_integer_byte_width, &value);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -317,7 +318,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
******************************************************************************/
acpi_status
-acpi_ns_convert_to_unicode(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_unicode(struct acpi_namespace_node *scope,
union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
@@ -384,7 +385,7 @@ acpi_ns_convert_to_unicode(struct acpi_namespace_node * scope,
******************************************************************************/
acpi_status
-acpi_ns_convert_to_resource(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_resource(struct acpi_namespace_node *scope,
union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
@@ -463,7 +464,7 @@ acpi_ns_convert_to_resource(struct acpi_namespace_node * scope,
******************************************************************************/
acpi_status
-acpi_ns_convert_to_reference(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index af236e348294..ce1f8605d996 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -81,7 +81,7 @@ acpi_ns_get_max_depth(acpi_handle obj_handle,
*
******************************************************************************/
-void acpi_ns_print_pathname(u32 num_segments, char *pathname)
+void acpi_ns_print_pathname(u32 num_segments, const char *pathname)
{
u32 i;
@@ -114,6 +114,9 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
acpi_os_printf("]\n");
}
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/* Not used at this time, perhaps later */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_dump_pathname
@@ -131,7 +134,8 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
******************************************************************************/
void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
+acpi_ns_dump_pathname(acpi_handle handle,
+ const char *msg, u32 level, u32 component)
{
ACPI_FUNCTION_TRACE(ns_dump_pathname);
@@ -148,6 +152,7 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
acpi_os_printf("\n");
return_VOID;
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index d4aa8b696ee9..36643a8cf65a 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -140,6 +140,7 @@ acpi_status acpi_ns_initialize_devices(u32 flags)
{
acpi_status status = AE_OK;
struct acpi_device_walk_info info;
+ acpi_handle handle;
ACPI_FUNCTION_TRACE(ns_initialize_devices);
@@ -190,6 +191,27 @@ acpi_status acpi_ns_initialize_devices(u32 flags)
if (ACPI_SUCCESS(status)) {
info.num_INI++;
}
+
+ /*
+ * Execute \_SB._INI.
+ * There appears to be a strict order requirement for \_SB._INI,
+ * which should be evaluated before any _REG evaluations.
+ */
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_SUCCESS(status)) {
+ memset(info.evaluate_info, 0,
+ sizeof(struct acpi_evaluate_info));
+ info.evaluate_info->prefix_node = handle;
+ info.evaluate_info->relative_pathname =
+ METHOD_NAME__INI;
+ info.evaluate_info->parameters = NULL;
+ info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info.evaluate_info);
+ if (ACPI_SUCCESS(status)) {
+ info.num_INI++;
+ }
+ }
}
/*
@@ -198,6 +220,12 @@ acpi_status acpi_ns_initialize_devices(u32 flags)
* Note: Any objects accessed by the _REG methods will be automatically
* initialized, even if they contain executable AML (see the call to
* acpi_ns_initialize_objects below).
+ *
+ * Note: According to the ACPI specification, we actually needn't execute
+ * _REG for system_memory/system_io operation regions, but for PCI_Config
+ * operation regions, it is required to evaluate _REG for those on a PCI
+ * root bus that doesn't contain _BBN object. So this code is kept here
+ * in order not to break things.
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -592,33 +620,37 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
* Note: We know there is an _INI within this subtree, but it may not be
* under this particular device, it may be lower in the branch.
*/
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
-
- memset(info, 0, sizeof(struct acpi_evaluate_info));
- info->prefix_node = device_node;
- info->relative_pathname = METHOD_NAME__INI;
- info->parameters = NULL;
- info->flags = ACPI_IGNORE_RETURN_VALUE;
-
- status = acpi_ns_evaluate(info);
-
- if (ACPI_SUCCESS(status)) {
- walk_info->num_INI++;
- }
+ if (!ACPI_COMPARE_NAME(device_node->name.ascii, "_SB_") ||
+ device_node->parent != acpi_gbl_root_node) {
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node,
+ METHOD_NAME__INI));
+
+ memset(info, 0, sizeof(struct acpi_evaluate_info));
+ info->prefix_node = device_node;
+ info->relative_pathname = METHOD_NAME__INI;
+ info->parameters = NULL;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ if (ACPI_SUCCESS(status)) {
+ walk_info->num_INI++;
+ }
#ifdef ACPI_DEBUG_OUTPUT
- else if (status != AE_NOT_FOUND) {
+ else if (status != AE_NOT_FOUND) {
- /* Ignore error and move on to next device */
+ /* Ignore error and move on to next device */
- char *scope_name =
- acpi_ns_get_normalized_pathname(device_node, TRUE);
+ char *scope_name =
+ acpi_ns_get_normalized_pathname(device_node, TRUE);
- ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
- scope_name));
- ACPI_FREE(scope_name);
- }
+ ACPI_EXCEPTION((AE_INFO, status,
+ "during %s._INI execution",
+ scope_name));
+ ACPI_FREE(scope_name);
+ }
#endif
+ }
/* Ignore errors from above */
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 75cdb8790d49..b5e2b0ada0ab 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -123,8 +123,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list.
tables[table_index].owner_id);
- acpi_tb_release_owner_id(table_index);
+ acpi_tb_release_owner_id(table_index);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index eb6e1b88a51d..f03dd41e86d0 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -113,7 +113,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
acpi_status
acpi_ns_handle_to_pathname(acpi_handle target_handle,
- struct acpi_buffer * buffer, u8 no_trailing)
+ struct acpi_buffer *buffer, u8 no_trailing)
{
acpi_status status;
struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 051306f0d0d6..cfa2bb7162d8 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -399,7 +399,7 @@ acpi_ns_attach_data(struct acpi_namespace_node *node,
******************************************************************************/
acpi_status
-acpi_ns_detach_data(struct acpi_namespace_node * node,
+acpi_ns_detach_data(struct acpi_namespace_node *node,
acpi_object_handler handler)
{
union acpi_operand_object *obj_desc;
@@ -444,7 +444,7 @@ acpi_ns_detach_data(struct acpi_namespace_node * node,
******************************************************************************/
acpi_status
-acpi_ns_get_attached_data(struct acpi_namespace_node * node,
+acpi_ns_get_attached_data(struct acpi_namespace_node *node,
acpi_object_handler handler, void **data)
{
union acpi_operand_object *obj_desc;
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 9047f2808d5b..fbedc6e8ab36 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -62,6 +62,10 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
u32 count1,
u8 type2, u32 count2, u32 start_index);
+static acpi_status
+acpi_ns_custom_package(struct acpi_evaluate_info *info,
+ union acpi_operand_object **elements, u32 count);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_check_package
@@ -135,6 +139,11 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
* PTYPE2 packages contain subpackages
*/
switch (package->ret_info.type) {
+ case ACPI_PTYPE_CUSTOM:
+
+ status = acpi_ns_custom_package(info, elements, count);
+ break;
+
case ACPI_PTYPE1_FIXED:
/*
* The package count is fixed and there are no subpackages
@@ -179,6 +188,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
if (ACPI_FAILURE(status)) {
return (status);
}
+
elements++;
}
break;
@@ -225,6 +235,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
return (status);
}
}
+
elements++;
}
break;
@@ -569,11 +580,13 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
if (sub_package->package.count < expected_count) {
goto package_too_small;
}
+
if (sub_package->package.count <
package->ret_info.count1) {
expected_count = package->ret_info.count1;
goto package_too_small;
}
+
if (expected_count == 0) {
/*
* Either the num_entries element was originally zero or it was
@@ -622,6 +635,83 @@ package_too_small:
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_custom_package
+ *
+ * PARAMETERS: info - Method execution information block
+ * elements - Pointer to the package elements array
+ * count - Element count for the package
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ * correct type of all sub-objects.
+ *
+ * NOTE: Currently used for the _BIX method only. When needed for two or more
+ * methods, probably a detect/dispatch mechanism will be required.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_custom_package(struct acpi_evaluate_info *info,
+ union acpi_operand_object **elements, u32 count)
+{
+ u32 expected_count;
+ u32 version;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_NAME(ns_custom_package);
+
+ /* Get version number, must be Integer */
+
+ if ((*elements)->common.type != ACPI_TYPE_INTEGER) {
+ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+ info->node_flags,
+ "Return Package has invalid object type for version number"));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ version = (u32)(*elements)->integer.value;
+ expected_count = 21; /* Version 1 */
+
+ if (version == 0) {
+ expected_count = 20; /* Version 0 */
+ }
+
+ if (count < expected_count) {
+ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+ info->node_flags,
+ "Return Package is too small - found %u elements, expected %u",
+ count, expected_count));
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+ } else if (count > expected_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u\n",
+ info->full_pathname, count, expected_count));
+ }
+
+ /* Validate all elements of the returned package */
+
+ status = acpi_ns_check_package_elements(info, elements,
+ ACPI_RTYPE_INTEGER, 16,
+ ACPI_RTYPE_STRING, 4, 0);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Version 1 has a single trailing integer */
+
+ if (version > 0) {
+ status = acpi_ns_check_package_elements(info, elements + 20,
+ ACPI_RTYPE_INTEGER, 1,
+ 0, 0, 20);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_check_package_elements
*
* PARAMETERS: info - Method execution information block
@@ -661,6 +751,7 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
if (ACPI_FAILURE(status)) {
return (status);
}
+
this_element++;
}
@@ -671,6 +762,7 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
if (ACPI_FAILURE(status)) {
return (status);
}
+
this_element++;
}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 805e36de8707..9523d41c7ae9 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -399,7 +399,7 @@ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
******************************************************************************/
acpi_status
-acpi_ns_repair_null_element(struct acpi_evaluate_info * info,
+acpi_ns_repair_null_element(struct acpi_evaluate_info *info,
u32 expected_btypes,
u32 package_index,
union acpi_operand_object **return_object_ptr)
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 63edbbbf9ae4..d5336122486b 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -54,9 +54,9 @@ ACPI_MODULE_NAME("nsrepair2")
* be repaired on a per-name basis.
*/
typedef
-acpi_status(*acpi_repair_function) (struct acpi_evaluate_info * info,
- union acpi_operand_object
- **return_object_ptr);
+acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
+ union acpi_operand_object **
+ return_object_ptr);
typedef struct acpi_repair_info {
char name[ACPI_NAME_SIZE];
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index c72cc62b92d0..784a30b76e0f 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -272,11 +272,11 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
result = &internal_name[i];
} else if (num_segments == 2) {
internal_name[i] = AML_DUAL_NAME_PREFIX;
- result = &internal_name[(acpi_size) i + 1];
+ result = &internal_name[(acpi_size)i + 1];
} else {
internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
- internal_name[(acpi_size) i + 1] = (char)num_segments;
- result = &internal_name[(acpi_size) i + 2];
+ internal_name[(acpi_size)i + 1] = (char)num_segments;
+ result = &internal_name[(acpi_size)i + 2];
}
}
@@ -456,7 +456,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
names_index = prefix_length + 2;
num_segments = (u8)
- internal_name[(acpi_size) prefix_length + 1];
+ internal_name[(acpi_size)prefix_length + 1];
break;
case AML_DUAL_NAME_PREFIX:
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index a7deeaa8eddc..d2a9b4fd739f 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -256,7 +256,7 @@ acpi_evaluate_object(acpi_handle handle,
* Allocate a new parameter block for the internal objects
* Add 1 to count to allow for null terminated internal list
*/
- info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size) info->
+ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)info->
param_count +
1) * sizeof(void *));
if (!info->parameters) {
@@ -280,13 +280,12 @@ acpi_evaluate_object(acpi_handle handle,
info->parameters[info->param_count] = NULL;
}
-#if 0
+#ifdef _FUTURE_FEATURE
/*
* Begin incoming argument count analysis. Check for too few args
* and too many args.
*/
-
switch (acpi_ns_get_type(info->node)) {
case ACPI_TYPE_METHOD:
@@ -370,68 +369,68 @@ acpi_evaluate_object(acpi_handle handle,
* If we are expecting a return value, and all went well above,
* copy the return value to an external object.
*/
- if (return_buffer) {
- if (!info->return_object) {
- return_buffer->length = 0;
- } else {
- if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
- ACPI_DESC_TYPE_NAMED) {
- /*
- * If we received a NS Node as a return object, this means that
- * the object we are evaluating has nothing interesting to
- * return (such as a mutex, etc.) We return an error because
- * these types are essentially unsupported by this interface.
- * We don't check up front because this makes it easier to add
- * support for various types at a later date if necessary.
- */
- status = AE_TYPE;
- info->return_object = NULL; /* No need to delete a NS Node */
- return_buffer->length = 0;
- }
+ if (!return_buffer) {
+ goto cleanup_return_object;
+ }
- if (ACPI_SUCCESS(status)) {
+ if (!info->return_object) {
+ return_buffer->length = 0;
+ goto cleanup;
+ }
- /* Dereference Index and ref_of references */
+ if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ /*
+ * If we received a NS Node as a return object, this means that
+ * the object we are evaluating has nothing interesting to
+ * return (such as a mutex, etc.) We return an error because
+ * these types are essentially unsupported by this interface.
+ * We don't check up front because this makes it easier to add
+ * support for various types at a later date if necessary.
+ */
+ status = AE_TYPE;
+ info->return_object = NULL; /* No need to delete a NS Node */
+ return_buffer->length = 0;
+ }
- acpi_ns_resolve_references(info);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup_return_object;
+ }
- /* Get the size of the returned object */
+ /* Dereference Index and ref_of references */
- status =
- acpi_ut_get_object_size(info->return_object,
- &buffer_space_needed);
- if (ACPI_SUCCESS(status)) {
-
- /* Validate/Allocate/Clear caller buffer */
-
- status =
- acpi_ut_initialize_buffer
- (return_buffer,
- buffer_space_needed);
- if (ACPI_FAILURE(status)) {
- /*
- * Caller's buffer is too small or a new one can't
- * be allocated
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Needed buffer size %X, %s\n",
- (u32)
- buffer_space_needed,
- acpi_format_exception
- (status)));
- } else {
- /* We have enough space for the object, build it */
-
- status =
- acpi_ut_copy_iobject_to_eobject
- (info->return_object,
- return_buffer);
- }
- }
- }
+ acpi_ns_resolve_references(info);
+
+ /* Get the size of the returned object */
+
+ status = acpi_ut_get_object_size(info->return_object,
+ &buffer_space_needed);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(return_buffer,
+ buffer_space_needed);
+ if (ACPI_FAILURE(status)) {
+ /*
+ * Caller's buffer is too small or a new one can't
+ * be allocated
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Needed buffer size %X, %s\n",
+ (u32)buffer_space_needed,
+ acpi_format_exception(status)));
+ } else {
+ /* We have enough space for the object, build it */
+
+ status =
+ acpi_ut_copy_iobject_to_eobject(info->return_object,
+ return_buffer);
}
}
+cleanup_return_object:
+
if (info->return_object) {
/*
* Delete the internal return object. NOTE: Interpreter must be
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 285b82044e7b..76a1bd4bb070 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -78,7 +78,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
acpi_status
acpi_get_handle(acpi_handle parent,
- acpi_string pathname, acpi_handle * ret_handle)
+ acpi_string pathname, acpi_handle *ret_handle)
{
acpi_status status;
struct acpi_namespace_node *node = NULL;
@@ -155,7 +155,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle)
*
******************************************************************************/
acpi_status
-acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
+acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
@@ -448,7 +448,7 @@ acpi_get_object_info(acpi_handle handle,
/* Point past the CID PNP_DEVICE_ID array */
next_id_string +=
- ((acpi_size) cid_list->count *
+ ((acpi_size)cid_list->count *
sizeof(struct acpi_pnp_device_id));
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c312cd490450..32d372b85243 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -63,7 +63,7 @@ ACPI_MODULE_NAME("nsxfobj")
* DESCRIPTION: This routine returns the type associatd with a particular handle
*
******************************************************************************/
-acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
+acpi_status acpi_get_type(acpi_handle handle, acpi_object_type *ret_type)
{
struct acpi_namespace_node *node;
acpi_status status;
@@ -115,7 +115,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type)
* Handle.
*
******************************************************************************/
-acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
+acpi_status acpi_get_parent(acpi_handle handle, acpi_handle *ret_handle)
{
struct acpi_namespace_node *node;
struct acpi_namespace_node *parent_node;
@@ -183,7 +183,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent)
acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
- acpi_handle child, acpi_handle * ret_handle)
+ acpi_handle child, acpi_handle *ret_handle)
{
acpi_status status;
struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index d48cbed342c1..c29c930ffa08 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -87,7 +87,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
* used to encode the package length, either 0,1,2, or 3
*/
byte_count = (aml[0] >> 6);
- parser_state->aml += ((acpi_size) byte_count + 1);
+ parser_state->aml += ((acpi_size)byte_count + 1);
/* Get bytes 3, 2, 1 as needed */
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index cfd17a4f2e91..177b05b239b7 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -158,7 +158,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
*
******************************************************************************/
-char *acpi_ps_get_opcode_name(u16 opcode)
+const char *acpi_ps_get_opcode_name(u16 opcode)
{
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8038ed2aca05..0a23897d8adf 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -130,8 +130,8 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
******************************************************************************/
acpi_status
-acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object * op)
+acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
{
union acpi_parse_object *prev;
union acpi_parse_object *next;
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index b28b0da171b6..89cb4bffcc7c 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -128,7 +128,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
if (op_info->flags & AML_DEFER) {
flags = ACPI_PARSEOP_DEFERRED;
} else if (op_info->flags & AML_NAMED) {
- flags = ACPI_PARSEOP_NAMED;
+ flags = ACPI_PARSEOP_NAMED_OBJECT;
} else if (opcode == AML_INT_BYTELIST_OP) {
flags = ACPI_PARSEOP_BYTELIST;
}
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 04b37fcca684..cf30cd821f5e 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -115,7 +115,7 @@ acpi_debug_trace(const char *name, u32 debug_level, u32 debug_layer, u32 flags)
*
******************************************************************************/
-acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
{
acpi_status status;
union acpi_parse_object *op;
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 2b1209d73e44..f1e83addd5b4 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -112,7 +112,7 @@ acpi_rs_struct_option_length(struct acpi_resource_source *resource_source)
* resource_source_index (1).
*/
if (resource_source->string_ptr) {
- return ((acpi_rs_length) (resource_source->string_length + 1));
+ return ((acpi_rs_length)(resource_source->string_length + 1));
}
return (0);
@@ -188,7 +188,7 @@ acpi_rs_stream_option_length(u32 resource_length,
acpi_status
acpi_rs_get_aml_length(struct acpi_resource *resource,
- acpi_size resource_list_size, acpi_size * size_needed)
+ acpi_size resource_list_size, acpi_size *size_needed)
{
acpi_size aml_size_needed = 0;
struct acpi_resource *resource_end;
@@ -278,11 +278,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* 16-Bit Address Resource:
* Add the size of the optional resource_source info
*/
- total_size = (acpi_rs_length) (total_size +
- acpi_rs_struct_option_length
- (&resource->data.
- address16.
- resource_source));
+ total_size = (acpi_rs_length)(total_size +
+ acpi_rs_struct_option_length
+ (&resource->data.
+ address16.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
@@ -290,11 +290,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* 32-Bit Address Resource:
* Add the size of the optional resource_source info
*/
- total_size = (acpi_rs_length) (total_size +
- acpi_rs_struct_option_length
- (&resource->data.
- address32.
- resource_source));
+ total_size = (acpi_rs_length)(total_size +
+ acpi_rs_struct_option_length
+ (&resource->data.
+ address32.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
@@ -302,11 +302,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* 64-Bit Address Resource:
* Add the size of the optional resource_source info
*/
- total_size = (acpi_rs_length) (total_size +
- acpi_rs_struct_option_length
- (&resource->data.
- address64.
- resource_source));
+ total_size = (acpi_rs_length)(total_size +
+ acpi_rs_struct_option_length
+ (&resource->data.
+ address64.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -315,28 +315,28 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* Add the size of each additional optional interrupt beyond the
* required 1 (4 bytes for each u32 interrupt number)
*/
- total_size = (acpi_rs_length) (total_size +
- ((resource->data.
- extended_irq.
- interrupt_count -
- 1) * 4) +
- /* Add the size of the optional resource_source info */
- acpi_rs_struct_option_length
- (&resource->data.
+ total_size = (acpi_rs_length)(total_size +
+ ((resource->data.
extended_irq.
- resource_source));
+ interrupt_count -
+ 1) * 4) +
+ /* Add the size of the optional resource_source info */
+ acpi_rs_struct_option_length
+ (&resource->data.
+ extended_irq.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_GPIO:
- total_size = (acpi_rs_length) (total_size +
- (resource->data.gpio.
- pin_table_length * 2) +
- resource->data.gpio.
- resource_source.
- string_length +
- resource->data.gpio.
- vendor_length);
+ total_size = (acpi_rs_length)(total_size +
+ (resource->data.gpio.
+ pin_table_length * 2) +
+ resource->data.gpio.
+ resource_source.
+ string_length +
+ resource->data.gpio.
+ vendor_length);
break;
@@ -348,14 +348,14 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
common_serial_bus.
type];
- total_size = (acpi_rs_length) (total_size +
- resource->data.
- i2c_serial_bus.
- resource_source.
- string_length +
- resource->data.
- i2c_serial_bus.
- vendor_length);
+ total_size = (acpi_rs_length)(total_size +
+ resource->data.
+ i2c_serial_bus.
+ resource_source.
+ string_length +
+ resource->data.
+ i2c_serial_bus.
+ vendor_length);
break;
@@ -397,8 +397,8 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
******************************************************************************/
acpi_status
-acpi_rs_get_list_length(u8 * aml_buffer,
- u32 aml_buffer_length, acpi_size * size_needed)
+acpi_rs_get_list_length(u8 *aml_buffer,
+ u32 aml_buffer_length, acpi_size *size_needed)
{
acpi_status status;
u8 *end_aml;
@@ -610,7 +610,7 @@ acpi_rs_get_list_length(u8 * aml_buffer,
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
- acpi_size * buffer_size_needed)
+ acpi_size *buffer_size_needed)
{
u32 number_of_elements;
acpi_size temp_size_needed = 0;
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 12978891e842..809b61c114fe 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -347,7 +347,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
(u8 *) output_buffer->pointer);
path_buffer.pointer = user_prt->source;
- status = acpi_ns_handle_to_pathname((acpi_handle) node, &path_buffer, FALSE);
+ status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE);
/* +1 to include null terminator */
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 23a17c86d5a9..5ffdb5602d8d 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -52,17 +52,17 @@ ACPI_MODULE_NAME("rsdump")
* All functions in this module are used by the AML Debugger only
*/
/* Local prototypes */
-static void acpi_rs_out_string(char *title, char *value);
+static void acpi_rs_out_string(const char *title, const char *value);
-static void acpi_rs_out_integer8(char *title, u8 value);
+static void acpi_rs_out_integer8(const char *title, u8 value);
-static void acpi_rs_out_integer16(char *title, u16 value);
+static void acpi_rs_out_integer16(const char *title, u16 value);
-static void acpi_rs_out_integer32(char *title, u32 value);
+static void acpi_rs_out_integer32(const char *title, u32 value);
-static void acpi_rs_out_integer64(char *title, u64 value);
+static void acpi_rs_out_integer64(const char *title, u64 value);
-static void acpi_rs_out_title(char *title);
+static void acpi_rs_out_title(const char *title);
static void acpi_rs_dump_byte_list(u16 length, u8 *data);
@@ -208,7 +208,7 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
{
u8 *target = NULL;
u8 *previous_target;
- char *name;
+ const char *name;
u8 count;
/* First table entry must contain the table length (# of table entries) */
@@ -248,10 +248,8 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
case ACPI_RSD_UINT8:
if (table->pointer) {
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer
- [*target]));
+ acpi_rs_out_string(name,
+ table->pointer[*target]);
} else {
acpi_rs_out_integer8(name, ACPI_GET8(target));
}
@@ -276,26 +274,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
case ACPI_RSD_1BITFLAG:
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer[*target &
- 0x01]));
+ acpi_rs_out_string(name,
+ table->pointer[*target & 0x01]);
break;
case ACPI_RSD_2BITFLAG:
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer[*target &
- 0x03]));
+ acpi_rs_out_string(name,
+ table->pointer[*target & 0x03]);
break;
case ACPI_RSD_3BITFLAG:
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer[*target &
- 0x07]));
+ acpi_rs_out_string(name,
+ table->pointer[*target & 0x07]);
break;
case ACPI_RSD_SHORTLIST:
@@ -481,7 +473,7 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
*
******************************************************************************/
-static void acpi_rs_out_string(char *title, char *value)
+static void acpi_rs_out_string(const char *title, const char *value)
{
acpi_os_printf("%27s : %s", title, value);
@@ -491,30 +483,30 @@ static void acpi_rs_out_string(char *title, char *value)
acpi_os_printf("\n");
}
-static void acpi_rs_out_integer8(char *title, u8 value)
+static void acpi_rs_out_integer8(const char *title, u8 value)
{
acpi_os_printf("%27s : %2.2X\n", title, value);
}
-static void acpi_rs_out_integer16(char *title, u16 value)
+static void acpi_rs_out_integer16(const char *title, u16 value)
{
acpi_os_printf("%27s : %4.4X\n", title, value);
}
-static void acpi_rs_out_integer32(char *title, u32 value)
+static void acpi_rs_out_integer32(const char *title, u32 value)
{
acpi_os_printf("%27s : %8.8X\n", title, value);
}
-static void acpi_rs_out_integer64(char *title, u64 value)
+static void acpi_rs_out_integer64(const char *title, u64 value)
{
acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
}
-static void acpi_rs_out_title(char *title)
+static void acpi_rs_out_title(const char *title)
{
acpi_os_printf("%27s : ", title);
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 5c3491387f9f..61e8f16c857d 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -330,19 +330,20 @@ struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.connection_sharing),"ConnectionSharing", acpi_gbl_shr_decode}, \
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
{ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
-struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[11] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
"Common Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS
};
-struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[14] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
"I2C Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
@@ -355,7 +356,7 @@ struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
"SlaveAddress", NULL},
};
-struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[18] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
"Spi Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
@@ -376,7 +377,7 @@ struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
"ConnectionSpeed", NULL},
};
-struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[20] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
"Uart Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index ce3d0b77ec89..25165ca42051 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -87,7 +87,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (((acpi_size) resource) & 0x3) {
+ if (((acpi_size)resource) & 0x3) {
/* Each internal resource struct is expected to be 32-bit aligned */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 8a01296ac7cf..b82c061f205a 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -151,7 +151,7 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[17] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
@@ -177,6 +177,11 @@ struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
AML_OFFSET(common_serial_bus.flags),
1},
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+ AML_OFFSET(common_serial_bus.flags),
+ 2},
+
{ACPI_RSC_MOVE8,
ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
AML_OFFSET(common_serial_bus.type_revision_id),
@@ -237,7 +242,7 @@ struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[21] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
@@ -263,6 +268,11 @@ struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
AML_OFFSET(common_serial_bus.flags),
1},
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+ AML_OFFSET(common_serial_bus.flags),
+ 2},
+
{ACPI_RSC_MOVE8,
ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
AML_OFFSET(common_serial_bus.type_revision_id),
@@ -339,7 +349,7 @@ struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[23] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
@@ -365,6 +375,11 @@ struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
AML_OFFSET(common_serial_bus.flags),
1},
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+ AML_OFFSET(common_serial_bus.flags),
+ 2},
+
{ACPI_RSC_MOVE8,
ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
AML_OFFSET(common_serial_bus.type_revision_id),
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index cf06e49cd91c..fa491c64c040 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -338,7 +338,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
* Note: Some resource descriptors will have an additional null, so
* we add 1 to the minimum length.
*/
- if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
+ if (total_length > (acpi_rsdesc_size)(minimum_length + 1)) {
/* Get the resource_source_index */
@@ -377,7 +377,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
ACPI_CAST_PTR(char,
&aml_resource_source[1]));
- return ((acpi_rs_length) total_length);
+ return ((acpi_rs_length)total_length);
}
/* resource_source is not present */
@@ -406,9 +406,9 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
******************************************************************************/
acpi_rsdesc_size
-acpi_rs_set_resource_source(union aml_resource * aml,
+acpi_rs_set_resource_source(union aml_resource *aml,
acpi_rs_length minimum_length,
- struct acpi_resource_source * resource_source)
+ struct acpi_resource_source *resource_source)
{
u8 *aml_resource_source;
acpi_rsdesc_size descriptor_length;
@@ -466,8 +466,8 @@ acpi_rs_set_resource_source(union aml_resource * aml,
******************************************************************************/
acpi_status
-acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
- struct acpi_buffer * ret_buffer)
+acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
@@ -671,7 +671,7 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
acpi_status
acpi_rs_get_method_data(acpi_handle handle,
- char *path, struct acpi_buffer *ret_buffer)
+ const char *path, struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 900933be9909..465ed8137167 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -433,8 +433,8 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
acpi_status
acpi_get_vendor_resource(acpi_handle device_handle,
char *name,
- struct acpi_vendor_uuid * uuid,
- struct acpi_buffer * ret_buffer)
+ struct acpi_vendor_uuid *uuid,
+ struct acpi_buffer *ret_buffer)
{
struct acpi_vendor_walk_info info;
acpi_status status;
@@ -539,7 +539,7 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
******************************************************************************/
acpi_status
-acpi_walk_resource_buffer(struct acpi_buffer * buffer,
+acpi_walk_resource_buffer(struct acpi_buffer *buffer,
acpi_walk_resource_callback user_function,
void *context)
{
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 7da79ce74080..1388a19e5db8 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -368,7 +368,7 @@ acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
*****************************************************************************/
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
{
acpi_status status = AE_OK;
@@ -401,9 +401,9 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
"%4.4s 0x%8.8X%8.8X"
" Attempted table install failed",
- acpi_ut_valid_acpi_name(table_desc->
- signature.
- ascii) ?
+ acpi_ut_valid_nameseg(table_desc->
+ signature.
+ ascii) ?
table_desc->signature.ascii : "????",
ACPI_FORMAT_UINT64(table_desc->
address)));
@@ -454,7 +454,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
table_count = acpi_gbl_root_table_list.current_table_count;
}
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count +
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -467,8 +467,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
memcpy(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) table_count *
- sizeof(struct acpi_table_desc));
+ (acpi_size)table_count * sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -701,7 +700,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
*
******************************************************************************/
-acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id * owner_id)
+acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
{
acpi_status status = AE_BAD_PARAMETER;
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index a79e4f30b530..620806965243 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -53,7 +53,7 @@ static void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id,
u8 byte_width,
- u64 address, char *register_name, u8 flags);
+ u64 address, const char *register_name, u8 flags);
static void acpi_tb_convert_fadt(void);
@@ -65,7 +65,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64);
/* Table for conversion of FADT to common internal format and FADT validation */
typedef struct acpi_fadt_info {
- char *name;
+ const char *name;
u16 address64;
u16 address32;
u16 length;
@@ -192,7 +192,7 @@ static void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id,
u8 byte_width,
- u64 address, char *register_name, u8 flags)
+ u64 address, const char *register_name, u8 flags)
{
u8 bit_width;
@@ -344,7 +344,7 @@ void acpi_tb_parse_fadt(void)
/* Obtain the DSDT and FACS tables via their addresses within the FADT */
- acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
+ acpi_tb_install_fixed_table((acpi_physical_address)acpi_gbl_FADT.Xdsdt,
ACPI_SIG_DSDT, &acpi_gbl_dsdt_index);
/* If Hardware Reduced flag is set, there is no FACS */
@@ -385,14 +385,15 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{
/*
* Check if the FADT is larger than the largest table that we expect
- * (the ACPI 5.0 version). If so, truncate the table, and issue
- * a warning.
+ * (typically the current ACPI specification version). If so, truncate
+ * the table, and issue a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_BIOS_WARNING((AE_INFO,
- "FADT (revision %u) is longer than ACPI 5.0 version, "
+ "FADT (revision %u) is longer than %s length, "
"truncating length %u to %u",
- table->revision, length,
+ table->revision, ACPI_FADT_CONFORMANCE,
+ length,
(u32)sizeof(struct acpi_table_fadt)));
}
@@ -467,7 +468,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
static void acpi_tb_convert_fadt(void)
{
- char *name;
+ const char *name;
struct acpi_generic_address *address64;
u32 address32;
u8 length;
@@ -646,9 +647,12 @@ static void acpi_tb_convert_fadt(void)
if ((address64->address && !length) ||
(!address64->address && length)) {
ACPI_BIOS_WARNING((AE_INFO,
- "Optional FADT field %s has zero address or length: "
- "0x%8.8X%8.8X/0x%X",
- name,
+ "Optional FADT field %s has valid %s but zero %s: "
+ "0x%8.8X%8.8X/0x%X", name,
+ (length ? "Length" :
+ "Address"),
+ (length ? "Address" :
+ "Length"),
ACPI_FORMAT_UINT64
(address64->address),
length));
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index f2d08034630e..e348d616e60f 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -76,7 +76,7 @@ acpi_tb_find_table(char *signature,
/* Validate the input table signature */
- if (!acpi_is_valid_signature(signature)) {
+ if (!acpi_ut_valid_nameseg(signature)) {
return_ACPI_STATUS(AE_BAD_SIGNATURE);
}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 4dc6108de4ff..8b13052128fc 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -299,9 +299,9 @@ acpi_tb_install_standard_table(acpi_physical_address address,
ACPI_BIOS_ERROR((AE_INFO,
"Table has invalid signature [%4.4s] (0x%8.8X), "
"must be SSDT or OEMx",
- acpi_ut_valid_acpi_name(new_table_desc.
- signature.
- ascii) ?
+ acpi_ut_valid_nameseg(new_table_desc.
+ signature.
+ ascii) ?
new_table_desc.signature.
ascii : "????",
new_table_desc.signature.integer));
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 9240c76d2823..e28553914bf5 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -231,7 +231,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
ACPI_FORMAT_UINT64(address64)));
}
#endif
- return ((acpi_physical_address) (address64));
+ return ((acpi_physical_address)(address64));
}
}
@@ -287,12 +287,12 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
* the XSDT if the revision is > 1 and the XSDT pointer is present,
* as per the ACPI specification.
*/
- address = (acpi_physical_address) rsdp->xsdt_physical_address;
+ address = (acpi_physical_address)rsdp->xsdt_physical_address;
table_entry_size = ACPI_XSDT_ENTRY_SIZE;
} else {
/* Root table is an RSDT (32-bit physical addresses) */
- address = (acpi_physical_address) rsdp->rsdt_physical_address;
+ address = (acpi_physical_address)rsdp->rsdt_physical_address;
table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
@@ -380,30 +380,3 @@ next_table:
acpi_os_unmap_memory(table, length);
return_ACPI_STATUS(AE_OK);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_is_valid_signature
- *
- * PARAMETERS: signature - Sig string to be validated
- *
- * RETURN: TRUE if signature is has 4 valid ACPI characters
- *
- * DESCRIPTION: Validate an ACPI table signature.
- *
- ******************************************************************************/
-
-u8 acpi_is_valid_signature(char *signature)
-{
- u32 i;
-
- /* Validate each character in the signature */
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (!acpi_ut_valid_acpi_char(signature[i], i)) {
- return (FALSE);
- }
- }
-
- return (TRUE);
-}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 326df65decef..3ecec937e8c9 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -99,7 +99,7 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count)
******************************************************************************/
acpi_status __init
-acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
+acpi_initialize_tables(struct acpi_table_desc *initial_table_array,
u32 initial_table_count, u8 allow_resize)
{
acpi_physical_address rsdp_address;
@@ -120,7 +120,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
/* Root Table Array has been statically allocated by the host */
memset(initial_table_array, 0,
- (acpi_size) initial_table_count *
+ (acpi_size)initial_table_count *
sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
@@ -352,7 +352,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
*
******************************************************************************/
acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header ** table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 3151968c10d1..ac71abcd32bb 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -82,7 +82,7 @@ acpi_status __init acpi_load_tables(void)
* their customized default region handlers.
*/
status = acpi_ev_install_region_handlers();
- if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During Region initialization"));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index b9a78e457d19..adb6cfc54661 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -90,7 +90,7 @@ u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp)
*
******************************************************************************/
-acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
+acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
{
/*
@@ -142,7 +142,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
*
******************************************************************************/
-acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
+acpi_status __init acpi_find_root_pointer(acpi_physical_address *table_address)
{
u8 *table_ptr;
u8 *mem_rover;
@@ -201,7 +201,7 @@ acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
(u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
*table_address =
- (acpi_physical_address) physical_address;
+ (acpi_physical_address)physical_address;
return_ACPI_STATUS(AE_OK);
}
}
@@ -234,7 +234,7 @@ acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
(ACPI_HI_RSDP_WINDOW_BASE +
ACPI_PTR_DIFF(mem_rover, table_ptr));
- *table_address = (acpi_physical_address) physical_address;
+ *table_address = (acpi_physical_address)physical_address;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3dbdc3ab8b78..13324a27b99b 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -231,7 +231,7 @@ acpi_status acpi_ut_delete_caches(void)
*
******************************************************************************/
-acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
+acpi_status acpi_ut_validate_buffer(struct acpi_buffer *buffer)
{
/* Obviously, the structure pointer must be valid */
@@ -272,8 +272,7 @@ acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
******************************************************************************/
acpi_status
-acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
- acpi_size required_length)
+acpi_ut_initialize_buffer(struct acpi_buffer *buffer, acpi_size required_length)
{
acpi_size input_buffer_length;
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
new file mode 100644
index 000000000000..706c1f346490
--- /dev/null
+++ b/drivers/acpi/acpica/utascii.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Module Name: utascii - Utility ascii functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2016, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_nameseg
+ *
+ * PARAMETERS: name - The name or table signature to be examined.
+ * Four characters, does not have to be a
+ * NULL terminated string.
+ *
+ * RETURN: TRUE if signature is has 4 valid ACPI characters
+ *
+ * DESCRIPTION: Validate an ACPI table signature.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_nameseg(char *name)
+{
+ u32 i;
+
+ /* Validate each character in the signature */
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (!acpi_ut_valid_name_char(name[i], i)) {
+ return (FALSE);
+ }
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_name_char
+ *
+ * PARAMETERS: char - The character to be examined
+ * position - Byte position (0-3)
+ *
+ * RETURN: TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ * We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_name_char(char character, u32 position)
+{
+
+ if (!((character >= 'A' && character <= 'Z') ||
+ (character >= '0' && character <= '9') || (character == '_'))) {
+
+ /* Allow a '!' in the last position */
+
+ if (character == '!' && position == 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_check_and_repair_ascii
+ *
+ * PARAMETERS: name - Ascii string
+ * count - Number of characters to check
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Ensure that the requested number of characters are printable
+ * Ascii characters. Sets non-printable and null chars to <space>.
+ *
+ ******************************************************************************/
+
+void acpi_ut_check_and_repair_ascii(u8 *name, char *repaired_name, u32 count)
+{
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ repaired_name[i] = (char)name[i];
+
+ if (!name[i]) {
+ return;
+ }
+ if (!isprint(name[i])) {
+ repaired_name[i] = ' ';
+ }
+ }
+}
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 0cfb2b8edad5..bd31faf5da7c 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -106,31 +106,31 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
default: /* Default is BYTE display */
acpi_os_printf("%02X ",
- buffer[(acpi_size) i + j]);
+ buffer[(acpi_size)i + j]);
break;
case DB_WORD_DISPLAY:
ACPI_MOVE_16_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_os_printf("%04X ", temp32);
break;
case DB_DWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_os_printf("%08X ", temp32);
break;
case DB_QWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_os_printf("%08X", temp32);
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j +
+ &buffer[(acpi_size)i + j +
4]);
acpi_os_printf("%08X ", temp32);
break;
@@ -158,7 +158,7 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
acpi_os_printf("// ");
}
- buf_char = buffer[(acpi_size) i + j];
+ buf_char = buffer[(acpi_size)i + j];
if (isprint(buf_char)) {
acpi_os_printf("%c", buf_char);
} else {
@@ -274,31 +274,31 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file,
default: /* Default is BYTE display */
acpi_ut_file_printf(file, "%02X ",
- buffer[(acpi_size) i + j]);
+ buffer[(acpi_size)i + j]);
break;
case DB_WORD_DISPLAY:
ACPI_MOVE_16_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_ut_file_printf(file, "%04X ", temp32);
break;
case DB_DWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_ut_file_printf(file, "%08X ", temp32);
break;
case DB_QWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_ut_file_printf(file, "%08X", temp32);
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j +
+ &buffer[(acpi_size)i + j +
4]);
acpi_ut_file_printf(file, "%08X ", temp32);
break;
@@ -318,7 +318,7 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file,
return;
}
- buf_char = buffer[(acpi_size) i + j];
+ buf_char = buffer[(acpi_size)i + j];
if (isprint(buf_char)) {
acpi_ut_file_printf(file, "%c", buf_char);
} else {
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index f8e9978888e1..3b8d23ef351f 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -105,7 +105,7 @@ acpi_os_create_cache(char *cache_name,
*
******************************************************************************/
-acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
+acpi_status acpi_os_purge_cache(struct acpi_memory_list *cache)
{
void *next;
acpi_status status;
@@ -151,7 +151,7 @@ acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
*
******************************************************************************/
-acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
+acpi_status acpi_os_delete_cache(struct acpi_memory_list *cache)
{
acpi_status status;
@@ -184,8 +184,7 @@ acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
*
******************************************************************************/
-acpi_status
-acpi_os_release_object(struct acpi_memory_list * cache, void *object)
+acpi_status acpi_os_release_object(struct acpi_memory_list *cache, void *object)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 98d53e59ce55..82f971402d85 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("utcopy")
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
- u8 * data_space, acpi_size * buffer_space_used);
+ u8 *data_space, acpi_size *buffer_space_used);
static acpi_status
acpi_ut_copy_ielement_to_ielement(u8 object_type,
@@ -63,7 +63,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
- u8 * buffer, acpi_size * space_used);
+ u8 *buffer, acpi_size *space_used);
static acpi_status
acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
@@ -111,7 +111,7 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
- u8 * data_space, acpi_size * buffer_space_used)
+ u8 *data_space, acpi_size *buffer_space_used)
{
acpi_status status = AE_OK;
@@ -151,7 +151,7 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
memcpy((void *)data_space,
(void *)internal_object->string.pointer,
- (acpi_size) internal_object->string.length + 1);
+ (acpi_size)internal_object->string.length + 1);
break;
case ACPI_TYPE_BUFFER:
@@ -331,7 +331,7 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type,
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
- u8 * buffer, acpi_size * space_used)
+ u8 *buffer, acpi_size *space_used)
{
union acpi_object *external_object;
acpi_status status;
@@ -362,7 +362,7 @@ acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
* Leave room for an array of ACPI_OBJECTS in the buffer
* and move the free space past it
*/
- info.length += (acpi_size) external_object->package.count *
+ info.length += (acpi_size)external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.free_space += external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
@@ -738,7 +738,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
*/
if (source_desc->string.pointer) {
dest_desc->string.pointer =
- ACPI_ALLOCATE((acpi_size) source_desc->string.
+ ACPI_ALLOCATE((acpi_size)source_desc->string.
length + 1);
if (!dest_desc->string.pointer) {
return (AE_NO_MEMORY);
@@ -748,7 +748,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
memcpy(dest_desc->string.pointer,
source_desc->string.pointer,
- (acpi_size) source_desc->string.length + 1);
+ (acpi_size)source_desc->string.length + 1);
}
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 1cfc5f69b033..574422205005 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -51,13 +51,9 @@
ACPI_MODULE_NAME("utdebug")
#ifdef ACPI_DEBUG_OUTPUT
-static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF;
-static char *acpi_gbl_fn_entry_str = "----Entry";
-static char *acpi_gbl_fn_exit_str = "----Exit-";
-
-/* Local prototypes */
-
-static const char *acpi_ut_trim_function_name(const char *function_name);
+static acpi_thread_id acpi_gbl_previous_thread_id = (acpi_thread_id) 0xFFFFFFFF;
+static const char *acpi_gbl_function_entry_prefix = "----Entry";
+static const char *acpi_gbl_function_exit_prefix = "----Exit-";
/*******************************************************************************
*
@@ -178,14 +174,14 @@ acpi_debug_print(u32 requested_debug_level,
* Thread tracking and context switch notification
*/
thread_id = acpi_os_get_thread_id();
- if (thread_id != acpi_gbl_prev_thread_id) {
+ if (thread_id != acpi_gbl_previous_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
("\n**** Context Switch from TID %u to TID %u ****\n\n",
- (u32)acpi_gbl_prev_thread_id, (u32)thread_id);
+ (u32)acpi_gbl_previous_thread_id, (u32)thread_id);
}
- acpi_gbl_prev_thread_id = thread_id;
+ acpi_gbl_previous_thread_id = thread_id;
acpi_gbl_nesting_level = 0;
}
@@ -287,7 +283,8 @@ acpi_ut_trace(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s\n", acpi_gbl_fn_entry_str);
+ component_id, "%s\n",
+ acpi_gbl_function_entry_prefix);
}
}
@@ -312,7 +309,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
void
acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, void *pointer)
+ const char *module_name,
+ u32 component_id, const void *pointer)
{
acpi_gbl_nesting_level++;
@@ -323,8 +321,8 @@ acpi_ut_trace_ptr(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s %p\n", acpi_gbl_fn_entry_str,
- pointer);
+ component_id, "%s %p\n",
+ acpi_gbl_function_entry_prefix, pointer);
}
}
@@ -348,7 +346,7 @@ acpi_ut_trace_ptr(u32 line_number,
void
acpi_ut_trace_str(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, char *string)
+ const char *module_name, u32 component_id, const char *string)
{
acpi_gbl_nesting_level++;
@@ -359,8 +357,8 @@ acpi_ut_trace_str(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s %s\n", acpi_gbl_fn_entry_str,
- string);
+ component_id, "%s %s\n",
+ acpi_gbl_function_entry_prefix, string);
}
}
@@ -396,7 +394,7 @@ acpi_ut_trace_u32(u32 line_number,
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
component_id, "%s %08X\n",
- acpi_gbl_fn_entry_str, integer);
+ acpi_gbl_function_entry_prefix, integer);
}
}
@@ -427,7 +425,8 @@ acpi_ut_exit(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s\n", acpi_gbl_fn_exit_str);
+ component_id, "%s\n",
+ acpi_gbl_function_exit_prefix);
}
if (acpi_gbl_nesting_level) {
@@ -467,14 +466,14 @@ acpi_ut_status_exit(u32 line_number,
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name,
module_name, component_id, "%s %s\n",
- acpi_gbl_fn_exit_str,
+ acpi_gbl_function_exit_prefix,
acpi_format_exception(status));
} else {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name,
module_name, component_id,
"%s ****Exception****: %s\n",
- acpi_gbl_fn_exit_str,
+ acpi_gbl_function_exit_prefix,
acpi_format_exception(status));
}
}
@@ -514,7 +513,7 @@ acpi_ut_value_exit(u32 line_number,
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
component_id, "%s %8.8X%8.8X\n",
- acpi_gbl_fn_exit_str,
+ acpi_gbl_function_exit_prefix,
ACPI_FORMAT_UINT64(value));
}
@@ -552,8 +551,8 @@ acpi_ut_ptr_exit(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s %p\n", acpi_gbl_fn_exit_str,
- ptr);
+ component_id, "%s %p\n",
+ acpi_gbl_function_exit_prefix, ptr);
}
if (acpi_gbl_nesting_level) {
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 6ba65b02550c..efd7988e34cb 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -446,7 +446,7 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
/* Names for Notify() values, used for debug output */
-static const char *acpi_gbl_generic_notify[ACPI_NOTIFY_MAX + 1] = {
+static const char *acpi_gbl_generic_notify[ACPI_GENERIC_NOTIFY_MAX + 1] = {
/* 00 */ "Bus Check",
/* 01 */ "Device Check",
/* 02 */ "Device Wake",
@@ -459,49 +459,53 @@ static const char *acpi_gbl_generic_notify[ACPI_NOTIFY_MAX + 1] = {
/* 09 */ "Device PLD Check",
/* 0A */ "Reserved",
/* 0B */ "System Locality Update",
- /* 0C */ "Shutdown Request",
+ /* 0C */ "Shutdown Request",
+ /* Reserved in ACPI 6.0 */
/* 0D */ "System Resource Affinity Update"
};
-static const char *acpi_gbl_device_notify[4] = {
+static const char *acpi_gbl_device_notify[5] = {
/* 80 */ "Status Change",
/* 81 */ "Information Change",
/* 82 */ "Device-Specific Change",
- /* 83 */ "Device-Specific Change"
+ /* 83 */ "Device-Specific Change",
+ /* 84 */ "Reserved"
};
-static const char *acpi_gbl_processor_notify[4] = {
+static const char *acpi_gbl_processor_notify[5] = {
/* 80 */ "Performance Capability Change",
/* 81 */ "C-State Change",
/* 82 */ "Throttling Capability Change",
- /* 83 */ "Device-Specific Change"
+ /* 83 */ "Guaranteed Change",
+ /* 84 */ "Minimum Excursion"
};
-static const char *acpi_gbl_thermal_notify[4] = {
+static const char *acpi_gbl_thermal_notify[5] = {
/* 80 */ "Thermal Status Change",
/* 81 */ "Thermal Trip Point Change",
/* 82 */ "Thermal Device List Change",
- /* 83 */ "Thermal Relationship Change"
+ /* 83 */ "Thermal Relationship Change",
+ /* 84 */ "Reserved"
};
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
{
- /* 00 - 0D are common to all object types */
+ /* 00 - 0D are "common to all object types" (from ACPI Spec) */
- if (notify_value <= ACPI_NOTIFY_MAX) {
+ if (notify_value <= ACPI_GENERIC_NOTIFY_MAX) {
return (acpi_gbl_generic_notify[notify_value]);
}
- /* 0D - 7F are reserved */
+ /* 0E - 7F are reserved */
if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
return ("Reserved");
}
- /* 80 - 83 are per-object-type */
+ /* 80 - 84 are per-object-type */
- if (notify_value <= 0x83) {
+ if (notify_value <= ACPI_SPECIFIC_NOTIFY_MAX) {
switch (type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_DEVICE:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 17b9f3e6e1e1..7bad13f2e518 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -69,7 +69,7 @@ ACPI_MODULE_NAME("uteval")
acpi_status
acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
- char *path,
+ const char *path,
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
{
@@ -204,7 +204,7 @@ cleanup:
******************************************************************************/
acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
+acpi_ut_evaluate_numeric_object(const char *object_name,
struct acpi_namespace_node *device_node,
u64 *value)
{
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 48fffcfe9911..dd3fd7f97f8e 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -80,6 +80,11 @@ const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = {
"_S4D"
};
+/* Hex-to-ascii */
+
+const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
+const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
+
/*******************************************************************************
*
* Namespace globals
@@ -221,6 +226,49 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
};
#endif /* !ACPI_REDUCED_HARDWARE */
+#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
+
+/* to_pld macro: compile/disassemble strings */
+
+const char *acpi_gbl_pld_panel_list[] = {
+ "TOP",
+ "BOTTOM",
+ "LEFT",
+ "RIGHT",
+ "FRONT",
+ "BACK",
+ "UNKNOWN",
+ NULL
+};
+
+const char *acpi_gbl_pld_vertical_position_list[] = {
+ "UPPER",
+ "CENTER",
+ "LOWER",
+ NULL
+};
+
+const char *acpi_gbl_pld_horizontal_position_list[] = {
+ "LEFT",
+ "CENTER",
+ "RIGHT",
+ NULL
+};
+
+const char *acpi_gbl_pld_shape_list[] = {
+ "ROUND",
+ "OVAL",
+ "SQUARE",
+ "VERTICALRECTANGLE",
+ "HORIZONTALRECTANGLE",
+ "VERTICALTRAPEZOID",
+ "HORIZONTALTRAPEZOID",
+ "UNKNOWN",
+ "CHAMFERED",
+ NULL
+};
+#endif
+
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 6fb4ec365272..f7cd2d52643b 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -95,7 +95,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
hid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
- (acpi_size) length);
+ (acpi_size)length);
if (!hid) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -173,7 +173,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
uid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
- (acpi_size) length);
+ (acpi_size)length);
if (!uid) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -309,7 +309,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
/* Area for CID strings starts after the CID PNP_DEVICE_ID array */
next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
- ((acpi_size) count * sizeof(struct acpi_pnp_device_id));
+ ((acpi_size)count * sizeof(struct acpi_pnp_device_id));
/* Copy/convert the CIDs to the return buffer */
@@ -413,7 +413,7 @@ acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
cls =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
- (acpi_size) length);
+ (acpi_size)length);
if (!cls) {
status = AE_NO_MEMORY;
goto cleanup;
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 667372093de1..2d6530ee7e51 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -236,8 +236,8 @@ acpi_ut_divide(u64 in_dividend,
}
remainder.full = remainder.full - dividend.full;
- remainder.part.hi = (u32) - ((s32) remainder.part.hi);
- remainder.part.lo = (u32) - ((s32) remainder.part.lo);
+ remainder.part.hi = (u32)-((s32)remainder.part.hi);
+ remainder.part.lo = (u32)-((s32)remainder.part.lo);
if (remainder.part.lo) {
remainder.part.hi--;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index d938c27cc6cf..389de3bd1ff1 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -361,7 +361,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void
acpi_ut_display_init_pathname(u8 type,
struct acpi_namespace_node *obj_handle,
- char *path)
+ const char *path)
{
acpi_status status;
struct acpi_buffer buffer;
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index d5c3adf19bd0..3465fe2c5a5c 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -205,37 +205,41 @@ acpi_ut_safe_strncat(char *dest,
*
* FUNCTION: acpi_ut_strtoul64
*
- * PARAMETERS: string - Null terminated string
- * base - Radix of the string: 16 or ACPI_ANY_BASE;
- * ACPI_ANY_BASE means 'in behalf of to_integer'
- * ret_integer - Where the converted integer is returned
+ * PARAMETERS: string - Null terminated string
+ * base - Radix of the string: 16 or 10 or
+ * ACPI_ANY_BASE
+ * max_integer_byte_width - Maximum allowable integer,in bytes:
+ * 4 or 8 (32 or 64 bits)
+ * ret_integer - Where the converted integer is
+ * returned
*
* RETURN: Status and Converted value
*
* DESCRIPTION: Convert a string into an unsigned value. Performs either a
- * 32-bit or 64-bit conversion, depending on the current mode
- * of the interpreter.
+ * 32-bit or 64-bit conversion, depending on the input integer
+ * size (often the current mode of the interpreter).
*
- * NOTES: acpi_gbl_integer_byte_width should be set to the proper width.
+ * NOTES: Negative numbers are not supported, as they are not supported
+ * by ACPI.
+ *
+ * acpi_gbl_integer_byte_width should be set to the proper width.
* For the core ACPICA code, this width depends on the DSDT
- * version. For iASL, the default byte width is always 8.
+ * version. For iASL, the default byte width is always 8 for the
+ * parser, but error checking is performed later to flag cases
+ * where a 64-bit constant is defined in a 32-bit DSDT/SSDT.
*
* Does not support Octal strings, not needed at this time.
*
- * There is an earlier version of the function after this one,
- * below. It is slightly different than this one, and the two
- * may eventually may need to be merged. (01/2016).
- *
******************************************************************************/
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+acpi_status
+acpi_ut_strtoul64(char *string,
+ u32 base, u32 max_integer_byte_width, u64 *ret_integer)
{
u32 this_digit = 0;
u64 return_value = 0;
u64 quotient;
u64 dividend;
- u32 to_integer_op = (base == ACPI_ANY_BASE);
- u32 mode32 = (acpi_gbl_integer_byte_width == 4);
u8 valid_digits = 0;
u8 sign_of0x = 0;
u8 term = 0;
@@ -244,6 +248,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
switch (base) {
case ACPI_ANY_BASE:
+ case 10:
case 16:
break;
@@ -265,9 +270,9 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
string++;
}
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
/*
- * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+ * Base equal to ACPI_ANY_BASE means 'Either decimal or hex'.
* We need to determine if it is decimal or hexadecimal.
*/
if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
@@ -284,7 +289,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* Any string left? Check that '0x' is not followed by white space. */
if (!(*string) || isspace((int)*string) || *string == '\t') {
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
goto error_exit;
} else {
goto all_done;
@@ -292,10 +297,11 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
}
/*
- * Perform a 32-bit or 64-bit conversion, depending upon the current
- * execution mode of the interpreter
+ * Perform a 32-bit or 64-bit conversion, depending upon the input
+ * byte width
*/
- dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+ dividend = (max_integer_byte_width <= ACPI_MAX32_BYTE_WIDTH) ?
+ ACPI_UINT32_MAX : ACPI_UINT64_MAX;
/* Main loop: convert the string to a 32- or 64-bit integer */
@@ -323,7 +329,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
}
if (term) {
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
goto error_exit;
} else {
break;
@@ -338,12 +344,13 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
valid_digits++;
- if (sign_of0x
- && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
+ if (sign_of0x && ((valid_digits > 16) ||
+ ((valid_digits > 8)
+ && (max_integer_byte_width <=
+ ACPI_MAX32_BYTE_WIDTH)))) {
/*
* This is to_integer operation case.
- * No any restrictions for string-to-integer conversion,
+ * No restrictions for string-to-integer conversion,
* see ACPI spec.
*/
goto error_exit;
@@ -355,7 +362,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
&quotient, NULL);
if (return_value > quotient) {
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
goto error_exit;
} else {
break;
@@ -378,7 +385,8 @@ all_done:
return_ACPI_STATUS(AE_OK);
error_exit:
- /* Base was set/validated above */
+
+ /* Base was set/validated above (10 or 16) */
if (base == 10) {
return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
@@ -388,8 +396,7 @@ error_exit:
}
#ifdef _OBSOLETE_FUNCTIONS
-/* TBD: use version in ACPICA main code base? */
-/* DONE: 01/2016 */
+/* Removed: 01/2016 */
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index edad3f043ab9..72b9a062bbab 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -51,11 +51,11 @@ ACPI_MODULE_NAME("utobject")
/* Local prototypes */
static acpi_status
acpi_ut_get_simple_object_size(union acpi_operand_object *obj,
- acpi_size * obj_length);
+ acpi_size *obj_length);
static acpi_status
acpi_ut_get_package_object_size(union acpi_operand_object *obj,
- acpi_size * obj_length);
+ acpi_size *obj_length);
static acpi_status
acpi_ut_get_element_length(u8 object_type,
@@ -177,7 +177,7 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count)
* Create the element array. Count+1 allows the array to be null
* terminated.
*/
- package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
+ package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size)count +
1) * sizeof(void *));
if (!package_elements) {
ACPI_FREE(package_desc);
@@ -454,7 +454,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
static acpi_status
acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
- acpi_size * obj_length)
+ acpi_size *obj_length)
{
acpi_size length;
acpi_size size;
@@ -495,12 +495,12 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
switch (internal_object->common.type) {
case ACPI_TYPE_STRING:
- length += (acpi_size) internal_object->string.length + 1;
+ length += (acpi_size)internal_object->string.length + 1;
break;
case ACPI_TYPE_BUFFER:
- length += (acpi_size) internal_object->buffer.length;
+ length += (acpi_size)internal_object->buffer.length;
break;
case ACPI_TYPE_INTEGER:
@@ -640,7 +640,7 @@ acpi_ut_get_element_length(u8 object_type,
static acpi_status
acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
- acpi_size * obj_length)
+ acpi_size *obj_length)
{
acpi_status status;
struct acpi_pkg_info info;
@@ -665,7 +665,7 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
*/
info.length +=
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) *
- (acpi_size) info.num_packages;
+ (acpi_size)info.num_packages;
/* Return the total package length */
@@ -689,7 +689,7 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
acpi_status
acpi_ut_get_object_size(union acpi_operand_object *internal_object,
- acpi_size * obj_length)
+ acpi_size *obj_length)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index b5cfe577fabf..3f5fed670271 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -150,7 +150,7 @@ acpi_status acpi_ut_initialize_interfaces(void)
i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1);
i++) {
acpi_default_supported_interfaces[i].next =
- &acpi_default_supported_interfaces[(acpi_size) i + 1];
+ &acpi_default_supported_interfaces[(acpi_size)i + 1];
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
@@ -397,7 +397,7 @@ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
*
******************************************************************************/
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
{
union acpi_operand_object *string_desc;
union acpi_operand_object *return_desc;
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 813520ab8ca4..3cd573c5f7f9 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("utownerid")
* when the method exits or the table is unloaded.
*
******************************************************************************/
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id)
{
u32 i;
u32 j;
@@ -122,7 +122,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
* permanently allocated (prevents +1 overflow)
*/
*owner_id =
- (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+ (acpi_owner_id)((k + 1) + ACPI_MUL_32(j));
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
"Allocated OwnerId: %2.2X\n",
@@ -167,7 +167,7 @@ exit:
*
******************************************************************************/
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
+void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr)
{
acpi_owner_id owner_id = *owner_id_ptr;
acpi_status status;
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 8c218ad787cd..dd084cf52502 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -67,11 +67,6 @@ static char *acpi_ut_format_number(char *string,
static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper);
-/* Module globals */
-
-static const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
-static const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_bound_string_length
@@ -269,9 +264,9 @@ static char *acpi_ut_format_number(char *string,
sign = '\0';
if (type & ACPI_FORMAT_SIGN) {
- if ((s64) number < 0) {
+ if ((s64)number < 0) {
sign = '-';
- number = -(s64) number;
+ number = -(s64)number;
width--;
} else if (type & ACPI_FORMAT_SIGN_PLUS) {
sign = '+';
@@ -409,7 +404,7 @@ acpi_ut_vsnprintf(char *string,
width = -1;
if (isdigit((int)*format)) {
format = acpi_ut_scan_number(format, &number);
- width = (s32) number;
+ width = (s32)number;
} else if (*format == '*') {
++format;
width = va_arg(args, int);
@@ -426,7 +421,7 @@ acpi_ut_vsnprintf(char *string,
++format;
if (isdigit((int)*format)) {
format = acpi_ut_scan_number(format, &number);
- precision = (s32) number;
+ precision = (s32)number;
} else if (*format == '*') {
++format;
precision = va_arg(args, int);
@@ -555,17 +550,17 @@ acpi_ut_vsnprintf(char *string,
if (qualifier == 'L') {
number = va_arg(args, u64);
if (type & ACPI_FORMAT_SIGN) {
- number = (s64) number;
+ number = (s64)number;
}
} else if (qualifier == 'l') {
number = va_arg(args, unsigned long);
if (type & ACPI_FORMAT_SIGN) {
- number = (s32) number;
+ number = (s32)number;
}
} else if (qualifier == 'h') {
number = (u16)va_arg(args, int);
if (type & ACPI_FORMAT_SIGN) {
- number = (s16) number;
+ number = (s16)number;
}
} else {
number = va_arg(args, unsigned int);
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 0b005728db4e..288913a0e709 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -130,7 +130,7 @@ void acpi_ut_print_string(char *string, u16 max_length)
} else {
/* All others will be Hex escapes */
- acpi_os_printf("\\x%2.2X", (s32) string[i]);
+ acpi_os_printf("\\x%2.2X", (s32)string[i]);
}
break;
}
@@ -145,73 +145,6 @@ void acpi_ut_print_string(char *string, u16 max_length)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_valid_acpi_char
- *
- * PARAMETERS: char - The character to be examined
- * position - Byte position (0-3)
- *
- * RETURN: TRUE if the character is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI character. Must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- * We allow a '!' as the last character because of the ASF! table
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position)
-{
-
- if (!((character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9') || (character == '_'))) {
-
- /* Allow a '!' in the last position */
-
- if (character == '!' && position == 3) {
- return (TRUE);
- }
-
- return (FALSE);
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_valid_acpi_name
- *
- * PARAMETERS: name - The name to be examined. Does not have to
- * be NULL terminated string.
- *
- * RETURN: TRUE if the name is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_name(char *name)
-{
- u32 i;
-
- ACPI_FUNCTION_ENTRY();
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (!acpi_ut_valid_acpi_char(name[i], i)) {
- return (FALSE);
- }
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_repair_name
*
* PARAMETERS: name - The ACPI name to be repaired
@@ -253,7 +186,7 @@ void acpi_ut_repair_name(char *name)
/* Check each character in the name */
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (acpi_ut_valid_acpi_char(name[i], i)) {
+ if (acpi_ut_valid_name_char(name[i], i)) {
continue;
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 60c406a8efcb..0df07dfa53b6 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -90,7 +90,7 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *address,
******************************************************************************/
acpi_status
-acpi_ut_create_list(char *list_name,
+acpi_ut_create_list(const char *list_name,
u16 object_size, struct acpi_memory_list **return_cache)
{
struct acpi_memory_list *cache;
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 68d4673f62e6..d9e6aac7dc83 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -127,7 +127,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
* and the value of out_buffer is undefined.
*
******************************************************************************/
-acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
+acpi_status acpi_get_system_info(struct acpi_buffer *out_buffer)
{
struct acpi_system_info *info_ptr;
acpi_status status;
@@ -483,7 +483,7 @@ ACPI_EXPORT_SYMBOL(acpi_check_address_range)
******************************************************************************/
acpi_status
acpi_decode_pld_buffer(u8 *in_buffer,
- acpi_size length, struct acpi_pld_info ** return_buffer)
+ acpi_size length, struct acpi_pld_info **return_buffer)
{
struct acpi_pld_info *pld_info;
u32 *buffer = ACPI_CAST_PTR(u32, in_buffer);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 96809cd99ace..bdc67bad61a7 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -3,7 +3,7 @@
*
* Check to see if the given machine has a known bad ACPI BIOS
* or if the BIOS is too old.
- * Check given machine against acpi_osi_dmi_table[].
+ * Check given machine against acpi_rev_dmi_table[].
*
* Copyright (C) 2004 Len Brown <len.brown@intel.com>
* Copyright (C) 2002 Andy Grover <andrew.grover@intel.com>
@@ -47,7 +47,7 @@ struct acpi_blacklist_item {
u32 is_critical_error;
};
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
+static struct dmi_system_id acpi_rev_dmi_table[] __initdata;
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
@@ -128,36 +128,12 @@ int __init acpi_blacklisted(void)
}
}
- dmi_check_system(acpi_osi_dmi_table);
+ (void)early_acpi_osi_init();
+ dmi_check_system(acpi_rev_dmi_table);
return blacklisted;
}
#ifdef CONFIG_DMI
-static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
-{
- acpi_dmi_osi_linux(1, d); /* enable */
- return 0;
-}
-static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
- acpi_osi_setup("!Windows 2006");
- acpi_osi_setup("!Windows 2006 SP1");
- acpi_osi_setup("!Windows 2006 SP2");
- return 0;
-}
-static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
- acpi_osi_setup("!Windows 2009");
- return 0;
-}
-static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
- acpi_osi_setup("!Windows 2012");
- return 0;
-}
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
{
@@ -168,169 +144,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
}
#endif
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Fujitsu Siemens",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
- },
- },
- {
- /*
- * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
- * driver (e.g. nouveau) when user press brightness hotkey.
- * Currently, nouveau driver didn't do the job and it causes there
- * have a infinite while loop in DSDT when user press hotkey.
- * We add MSI GX723's dmi information to this table for workaround
- * this issue.
- * Will remove MSI GX723 from the table after nouveau grows support.
- */
- .callback = dmi_disable_osi_vista,
- .ident = "MSI GX723",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
- DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Sony VGN-NS10J_S",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Sony VGN-SR290J",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "VGN-NS50B_L",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "VGN-SR19XN",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Toshiba Satellite L355",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
- },
- },
- {
- .callback = dmi_disable_osi_win7,
- .ident = "ASUS K50IJ",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Toshiba P305D",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Toshiba NB100",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
- },
- },
-
- /*
- * The wireless hotkey does not work on those machines when
- * returning true for _OSI("Windows 2012")
- */
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 7737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 7537",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 5437",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 3437",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Vostro 3446",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Vostro 3546",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
- },
- },
-
- /*
- * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
- * Linux ignores it, except for the machines enumerated below.
- */
-
- /*
- * Without this this EEEpc exports a non working WMI interface, with
- * this it exports a working "good old" eeepc_laptop interface, fixing
- * both brightness control, and rfkill not working.
- */
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Asus EEE PC 1015PX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
- },
- },
-
+static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
/*
* DELL XPS 13 (2015) switches sound between HDA and I2S
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c068c829b453..31e8da648fff 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -925,11 +925,13 @@ void __init acpi_early_init(void)
goto error0;
}
- status = acpi_load_tables();
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to load the System Description Tables\n");
- goto error0;
+ if (acpi_gbl_group_module_level_code) {
+ status = acpi_load_tables();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to load the System Description Tables\n");
+ goto error0;
+ }
}
#ifdef CONFIG_X86
@@ -995,17 +997,10 @@ static int __init acpi_bus_init(void)
acpi_os_initialize1();
- status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to start the ACPI Interpreter\n");
- goto error1;
- }
-
/*
* ACPI 2.0 requires the EC driver to be loaded and work before
- * the EC device is found in the namespace (i.e. before acpi_initialize_objects()
- * is called).
+ * the EC device is found in the namespace (i.e. before
+ * acpi_load_tables() is called).
*
* This is accomplished by looking for the ECDT table, and getting
* the EC parameters out of that.
@@ -1013,6 +1008,22 @@ static int __init acpi_bus_init(void)
status = acpi_ec_ecdt_probe();
/* Ignore result. Not having an ECDT is not fatal. */
+ if (!acpi_gbl_group_module_level_code) {
+ status = acpi_load_tables();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to load the System Description Tables\n");
+ goto error1;
+ }
+ }
+
+ status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to start the ACPI Interpreter\n");
+ goto error1;
+ }
+
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index b9afb47db7ed..7b2c48fde4e2 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -35,7 +35,7 @@ static ssize_t acpi_object_path(acpi_handle handle, char *buf)
if (result)
return result;
- result = sprintf(buf, "%s\n", (char*)path.pointer);
+ result = sprintf(buf, "%s\n", (char *)path.pointer);
kfree(path.pointer);
return result;
}
@@ -333,7 +333,8 @@ int acpi_device_modalias(struct device *dev, char *buf, int size)
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
@@ -397,7 +398,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
+acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
@@ -467,12 +469,27 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return -EIO;
return sprintf(buf, "%llu\n", sun);
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+static ssize_t
+acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ acpi_status status;
+ unsigned long long hrv;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return sprintf(buf, "%llu\n", hrv);
+}
+static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
+
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -481,7 +498,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return -EIO;
return sprintf(buf, "%llu\n", sta);
}
@@ -541,16 +558,22 @@ int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
+ if (acpi_has_method(dev->handle, "_HRV")) {
+ result = device_create_file(&dev->dev, &dev_attr_hrv);
+ if (result)
+ goto end;
+ }
+
if (acpi_has_method(dev->handle, "_STA")) {
result = device_create_file(&dev->dev, &dev_attr_status);
if (result)
goto end;
}
- /*
- * If device has _EJ0, 'eject' file is created that is used to trigger
- * hot-removal function from userland.
- */
+ /*
+ * If device has _EJ0, 'eject' file is created that is used to trigger
+ * hot-removal function from userland.
+ */
if (acpi_has_method(dev->handle, "_EJ0")) {
result = device_create_file(&dev->dev, &dev_attr_eject);
if (result)
@@ -604,6 +627,9 @@ void acpi_device_remove_files(struct acpi_device *dev)
if (acpi_has_method(dev->handle, "_SUN"))
device_remove_file(&dev->dev, &dev_attr_sun);
+ if (acpi_has_method(dev->handle, "_HRV"))
+ device_remove_file(&dev->dev, &dev_attr_hrv);
+
if (dev->pnp.unique_id)
device_remove_file(&dev->dev, &dev_attr_uid);
if (dev->pnp.type.bus_address)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b420fb46669d..0e70181f150c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -105,8 +105,8 @@ enum ec_command {
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
- EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
- * OpReg are installed */
+ EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
+ EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
@@ -175,10 +175,9 @@ static void acpi_ec_event_processor(struct work_struct *work);
struct acpi_ec *boot_ec, *first_ec;
EXPORT_SYMBOL(first_ec);
-static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
-static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
/* --------------------------------------------------------------------------
* Logging/Debugging
@@ -367,7 +366,8 @@ static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
static void acpi_ec_submit_request(struct acpi_ec *ec)
{
ec->reference_count++;
- if (ec->reference_count == 1)
+ if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
+ ec->reference_count == 1)
acpi_ec_enable_gpe(ec, true);
}
@@ -376,7 +376,8 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
bool flushed = false;
ec->reference_count--;
- if (ec->reference_count == 0)
+ if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
+ ec->reference_count == 0)
acpi_ec_disable_gpe(ec, true);
flushed = acpi_ec_flushed(ec);
if (flushed)
@@ -1287,52 +1288,64 @@ static int ec_install_handlers(struct acpi_ec *ec)
{
acpi_status status;
- if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
- return 0;
- status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
acpi_ec_start(ec, false);
- status = acpi_install_address_space_handler(ec->handle,
- ACPI_ADR_SPACE_EC,
- &acpi_ec_space_handler,
- NULL, ec);
- if (ACPI_FAILURE(status)) {
- if (status == AE_NOT_FOUND) {
- /*
- * Maybe OS fails in evaluating the _REG object.
- * The AE_NOT_FOUND error will be ignored and OS
- * continue to initialize EC.
- */
- pr_err("Fail in evaluating the _REG object"
- " of EC device. Broken bios is suspected.\n");
- } else {
- acpi_ec_stop(ec, false);
- acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler);
- return -ENODEV;
+
+ if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ status = acpi_install_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+ NULL, ec);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Maybe OS fails in evaluating the _REG
+ * object. The AE_NOT_FOUND error will be
+ * ignored and OS * continue to initialize
+ * EC.
+ */
+ pr_err("Fail in evaluating the _REG object"
+ " of EC device. Broken bios is suspected.\n");
+ } else {
+ acpi_ec_stop(ec, false);
+ return -ENODEV;
+ }
+ }
+ set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+ }
+
+ if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
+ status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
+ /* This is not fatal as we can poll EC events */
+ if (ACPI_SUCCESS(status)) {
+ set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_ec_enable_gpe(ec, true);
}
}
- set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
return 0;
}
static void ec_remove_handlers(struct acpi_ec *ec)
{
- if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
- return;
acpi_ec_stop(ec, false);
- if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
- ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
- pr_err("failed to remove space handler\n");
- if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler)))
- pr_err("failed to remove gpe handler\n");
- clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+
+ if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+ pr_err("failed to remove space handler\n");
+ clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+ }
+
+ if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
+ if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler)))
+ pr_err("failed to remove gpe handler\n");
+ clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+ }
}
static int acpi_ec_add(struct acpi_device *device)
@@ -1344,11 +1357,12 @@ static int acpi_ec_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
/* Check for boot EC */
- if (boot_ec &&
- (boot_ec->handle == device->handle ||
- boot_ec->handle == ACPI_ROOT_OBJECT)) {
+ if (boot_ec) {
ec = boot_ec;
boot_ec = NULL;
+ ec_remove_handlers(ec);
+ if (first_ec == ec)
+ first_ec = NULL;
} else {
ec = make_acpi_ec();
if (!ec)
@@ -1434,7 +1448,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
int __init acpi_boot_ec_enable(void)
{
- if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
+ if (!boot_ec)
return 0;
if (!ec_install_handlers(boot_ec)) {
first_ec = boot_ec;
@@ -1448,20 +1462,6 @@ static const struct acpi_device_id ec_device_ids[] = {
{"", 0},
};
-/* Some BIOS do not survive early DSDT scan, skip it */
-static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
-{
- EC_FLAGS_SKIP_DSDT_SCAN = 1;
- return 0;
-}
-
-/* ASUStek often supplies us with broken ECDT, validate it */
-static int ec_validate_ecdt(const struct dmi_system_id *id)
-{
- EC_FLAGS_VALIDATE_ECDT = 1;
- return 0;
-}
-
#if 0
/*
* Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
@@ -1503,30 +1503,29 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
return 0;
}
+static int ec_correct_ecdt(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing ECDT address correction.\n");
+ EC_FLAGS_CORRECT_ECDT = 1;
+ return 0;
+}
+
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
- ec_skip_dsdt_scan, "Compal JFL92", {
- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
+ ec_correct_ecdt, "Asus L4R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
+ DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
{
- ec_validate_ecdt, "MSI MS-171F", {
+ ec_correct_ecdt, "Asus M6R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "0207"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
+ DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
+ {
+ ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
{
- ec_validate_ecdt, "ASUS hardware", {
- DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
- {
- ec_validate_ecdt, "ASUS hardware", {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
- {
- ec_skip_dsdt_scan, "HP Folio 13", {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
- {
- ec_validate_ecdt, "ASUS hardware", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
- {
ec_clear_on_resume, "Samsung hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
@@ -1534,8 +1533,8 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
int __init acpi_ec_ecdt_probe(void)
{
+ int ret = 0;
acpi_status status;
- struct acpi_ec *saved_ec = NULL;
struct acpi_table_ecdt *ecdt_ptr;
boot_ec = make_acpi_ec();
@@ -1547,67 +1546,45 @@ int __init acpi_ec_ecdt_probe(void)
dmi_check_system(ec_dmi_table);
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
- if (ACPI_SUCCESS(status)) {
- pr_info("EC description table is found, configuring boot EC\n");
- boot_ec->command_addr = ecdt_ptr->control.address;
- boot_ec->data_addr = ecdt_ptr->data.address;
- boot_ec->gpe = ecdt_ptr->gpe;
- boot_ec->handle = ACPI_ROOT_OBJECT;
- acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
- &boot_ec->handle);
- /* Don't trust ECDT, which comes from ASUSTek */
- if (!EC_FLAGS_VALIDATE_ECDT)
- goto install;
- saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
- if (!saved_ec)
- return -ENOMEM;
- /* fall through */
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
+ goto error;
}
- if (EC_FLAGS_SKIP_DSDT_SCAN) {
- kfree(saved_ec);
- return -ENODEV;
+ if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
+ /*
+ * Asus X50GL:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=11880
+ */
+ ret = -ENODEV;
+ goto error;
}
- /* This workaround is needed only on some broken machines,
- * which require early EC, but fail to provide ECDT */
- pr_debug("Look up EC in DSDT\n");
- status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
- boot_ec, NULL);
- /* Check that acpi_get_devices actually find something */
- if (ACPI_FAILURE(status) || !boot_ec->handle)
- goto error;
- if (saved_ec) {
- /* try to find good ECDT from ASUSTek */
- if (saved_ec->command_addr != boot_ec->command_addr ||
- saved_ec->data_addr != boot_ec->data_addr ||
- saved_ec->gpe != boot_ec->gpe ||
- saved_ec->handle != boot_ec->handle)
- pr_info("ASUSTek keeps feeding us with broken "
- "ECDT tables, which are very hard to workaround. "
- "Trying to use DSDT EC info instead. Please send "
- "output of acpidump to linux-acpi@vger.kernel.org\n");
- kfree(saved_ec);
- saved_ec = NULL;
+ pr_info("EC description table is found, configuring boot EC\n");
+ if (EC_FLAGS_CORRECT_ECDT) {
+ /*
+ * Asus L4R, Asus M6R
+ * https://bugzilla.kernel.org/show_bug.cgi?id=9399
+ * MSI MS-171F
+ * https://bugzilla.kernel.org/show_bug.cgi?id=12461
+ */
+ boot_ec->command_addr = ecdt_ptr->data.address;
+ boot_ec->data_addr = ecdt_ptr->control.address;
} else {
- /* We really need to limit this workaround, the only ASUS,
- * which needs it, has fake EC._INI method, so use it as flag.
- * Keep boot_ec struct as it will be needed soon.
- */
- if (!dmi_name_in_vendors("ASUS") ||
- !acpi_has_method(boot_ec->handle, "_INI"))
- return -ENODEV;
+ boot_ec->command_addr = ecdt_ptr->control.address;
+ boot_ec->data_addr = ecdt_ptr->data.address;
}
-install:
- if (!ec_install_handlers(boot_ec)) {
+ boot_ec->gpe = ecdt_ptr->gpe;
+ boot_ec->handle = ACPI_ROOT_OBJECT;
+ ret = ec_install_handlers(boot_ec);
+ if (!ret)
first_ec = boot_ec;
- return 0;
- }
error:
- kfree(boot_ec);
- kfree(saved_ec);
- boot_ec = NULL;
- return -ENODEV;
+ if (ret) {
+ kfree(boot_ec);
+ boot_ec = NULL;
+ }
+ return ret;
}
static int param_set_event_clearing(const char *val, struct kernel_param *kp)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
new file mode 100644
index 000000000000..46f060356a22
--- /dev/null
+++ b/drivers/acpi/evged.c
@@ -0,0 +1,154 @@
+/*
+ * Generic Event Device for ACPI.
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Generic Event Device allows platforms to handle interrupts in ACPI
+ * ASL statements. It follows very similar to _EVT method approach
+ * from GPIO events. All interrupts are listed in _CRS and the handler
+ * is written in _EVT method. Here is an example.
+ *
+ * Device (GED0)
+ * {
+ *
+ * Name (_HID, "ACPI0013")
+ * Name (_UID, 0)
+ * Method (_CRS, 0x0, Serialized)
+ * {
+ * Name (RBUF, ResourceTemplate ()
+ * {
+ * Interrupt(ResourceConsumer, Edge, ActiveHigh, Shared, , , )
+ * {123}
+ * }
+ * })
+ *
+ * Method (_EVT, 1) {
+ * if (Lequal(123, Arg0))
+ * {
+ * }
+ * }
+ * }
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+#define MODULE_NAME "acpi-ged"
+
+struct acpi_ged_event {
+ struct list_head node;
+ struct device *dev;
+ unsigned int gsi;
+ unsigned int irq;
+ acpi_handle handle;
+};
+
+static irqreturn_t acpi_ged_irq_handler(int irq, void *data)
+{
+ struct acpi_ged_event *event = data;
+ acpi_status acpi_ret;
+
+ acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi);
+ if (ACPI_FAILURE(acpi_ret))
+ dev_err_once(event->dev, "IRQ method execution failed\n");
+
+ return IRQ_HANDLED;
+}
+
+static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
+ void *context)
+{
+ struct acpi_ged_event *event;
+ unsigned int irq;
+ unsigned int gsi;
+ unsigned int irqflags = IRQF_ONESHOT;
+ struct device *dev = context;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ acpi_handle evt_handle;
+ struct resource r;
+ struct acpi_resource_irq *p = &ares->data.irq;
+ struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
+ return AE_OK;
+
+ if (!acpi_dev_resource_interrupt(ares, 0, &r)) {
+ dev_err(dev, "unable to parse IRQ resource\n");
+ return AE_ERROR;
+ }
+ if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
+ gsi = p->interrupts[0];
+ else
+ gsi = pext->interrupts[0];
+
+ irq = r.start;
+
+ if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
+ dev_err(dev, "cannot locate _EVT method\n");
+ return AE_ERROR;
+ }
+
+ dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+
+ event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return AE_ERROR;
+
+ event->gsi = gsi;
+ event->dev = dev;
+ event->irq = irq;
+ event->handle = evt_handle;
+
+ if (r.flags & IORESOURCE_IRQ_SHAREABLE)
+ irqflags |= IRQF_SHARED;
+
+ if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
+ irqflags, "ACPI:Ged", event)) {
+ dev_err(dev, "failed to setup event handler for irq %u\n", irq);
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static int ged_probe(struct platform_device *pdev)
+{
+ acpi_status acpi_ret;
+
+ acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
+ acpi_ged_request_interrupt, &pdev->dev);
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(&pdev->dev, "unable to parse the _CRS record\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id ged_acpi_ids[] = {
+ {"ACPI0013"},
+ {},
+};
+
+static struct platform_driver ged_driver = {
+ .probe = ged_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .acpi_match_table = ACPI_PTR(ged_acpi_ids),
+ },
+};
+builtin_platform_driver(ged_driver);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7c188472d9c2..9bb0773d39bf 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -20,7 +20,8 @@
#define PREFIX "ACPI: "
-void acpi_initrd_initialize_tables(void);
+int early_acpi_osi_init(void);
+int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
void init_acpi_device_notify(void);
int acpi_scan_init(void);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 72b6e9ef0ae9..d176e0ece470 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -327,10 +327,18 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
- acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
+ struct acpi_subtable_proc srat_proc[2];
+
+ memset(srat_proc, 0, sizeof(srat_proc));
+ srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
+ srat_proc[0].handler = acpi_parse_processor_affinity;
+ srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
+ srat_proc[1].handler = acpi_parse_x2apic_affinity;
+
+ acpi_table_parse_entries_array(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ srat_proc, ARRAY_SIZE(srat_proc), 0);
+
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
new file mode 100644
index 000000000000..849f9d2245ca
--- /dev/null
+++ b/drivers/acpi/osi.c
@@ -0,0 +1,522 @@
+/*
+ * osi.c - _OSI implementation
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Lv Zheng <lv.zheng@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/* Uncomment next line to get verbose printout */
+/* #define DEBUG */
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+
+#include "internal.h"
+
+
+#define OSI_STRING_LENGTH_MAX 64
+#define OSI_STRING_ENTRIES_MAX 16
+
+struct acpi_osi_entry {
+ char string[OSI_STRING_LENGTH_MAX];
+ bool enable;
+};
+
+static struct acpi_osi_config {
+ u8 default_disabling;
+ unsigned int linux_enable:1;
+ unsigned int linux_dmi:1;
+ unsigned int linux_cmdline:1;
+ unsigned int darwin_enable:1;
+ unsigned int darwin_dmi:1;
+ unsigned int darwin_cmdline:1;
+} osi_config;
+
+static struct acpi_osi_config osi_config;
+static struct acpi_osi_entry
+osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
+ {"Module Device", true},
+ {"Processor Device", true},
+ {"3.0 _SCP Extensions", true},
+ {"Processor Aggregator Device", true},
+};
+
+static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+{
+ if (!strcmp("Linux", interface)) {
+ pr_notice_once(FW_BUG
+ "BIOS _OSI(Linux) query %s%s\n",
+ osi_config.linux_enable ? "honored" : "ignored",
+ osi_config.linux_cmdline ? " via cmdline" :
+ osi_config.linux_dmi ? " via DMI" : "");
+ }
+ if (!strcmp("Darwin", interface)) {
+ pr_notice_once(
+ "BIOS _OSI(Darwin) query %s%s\n",
+ osi_config.darwin_enable ? "honored" : "ignored",
+ osi_config.darwin_cmdline ? " via cmdline" :
+ osi_config.darwin_dmi ? " via DMI" : "");
+ }
+
+ return supported;
+}
+
+void __init acpi_osi_setup(char *str)
+{
+ struct acpi_osi_entry *osi;
+ bool enable = true;
+ int i;
+
+ if (!acpi_gbl_create_osi_method)
+ return;
+
+ if (str == NULL || *str == '\0') {
+ pr_info("_OSI method disabled\n");
+ acpi_gbl_create_osi_method = FALSE;
+ return;
+ }
+
+ if (*str == '!') {
+ str++;
+ if (*str == '\0') {
+ /* Do not override acpi_osi=!* */
+ if (!osi_config.default_disabling)
+ osi_config.default_disabling =
+ ACPI_DISABLE_ALL_VENDOR_STRINGS;
+ return;
+ } else if (*str == '*') {
+ osi_config.default_disabling = ACPI_DISABLE_ALL_STRINGS;
+ for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ osi = &osi_setup_entries[i];
+ osi->enable = false;
+ }
+ return;
+ } else if (*str == '!') {
+ osi_config.default_disabling = 0;
+ return;
+ }
+ enable = false;
+ }
+
+ for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ osi = &osi_setup_entries[i];
+ if (!strcmp(osi->string, str)) {
+ osi->enable = enable;
+ break;
+ } else if (osi->string[0] == '\0') {
+ osi->enable = enable;
+ strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
+ break;
+ }
+ }
+}
+
+static void __init __acpi_osi_setup_darwin(bool enable)
+{
+ osi_config.darwin_enable = !!enable;
+ if (enable) {
+ acpi_osi_setup("!");
+ acpi_osi_setup("Darwin");
+ } else {
+ acpi_osi_setup("!!");
+ acpi_osi_setup("!Darwin");
+ }
+}
+
+static void __init acpi_osi_setup_darwin(bool enable)
+{
+ /* Override acpi_osi_dmi_blacklisted() */
+ osi_config.darwin_dmi = 0;
+ osi_config.darwin_cmdline = 1;
+ __acpi_osi_setup_darwin(enable);
+}
+
+/*
+ * The story of _OSI(Linux)
+ *
+ * From pre-history through Linux-2.6.22, Linux responded TRUE upon a BIOS
+ * OSI(Linux) query.
+ *
+ * Unfortunately, reference BIOS writers got wind of this and put
+ * OSI(Linux) in their example code, quickly exposing this string as
+ * ill-conceived and opening the door to an un-bounded number of BIOS
+ * incompatibilities.
+ *
+ * For example, OSI(Linux) was used on resume to re-POST a video card on
+ * one system, because Linux at that time could not do a speedy restore in
+ * its native driver. But then upon gaining quick native restore
+ * capability, Linux has no way to tell the BIOS to skip the time-consuming
+ * POST -- putting Linux at a permanent performance disadvantage. On
+ * another system, the BIOS writer used OSI(Linux) to infer native OS
+ * support for IPMI! On other systems, OSI(Linux) simply got in the way of
+ * Linux claiming to be compatible with other operating systems, exposing
+ * BIOS issues such as skipped device initialization.
+ *
+ * So "Linux" turned out to be a really poor chose of OSI string, and from
+ * Linux-2.6.23 onward we respond FALSE.
+ *
+ * BIOS writers should NOT query _OSI(Linux) on future systems. Linux will
+ * complain on the console when it sees it, and return FALSE. To get Linux
+ * to return TRUE for your system will require a kernel source update to
+ * add a DMI entry, or boot with "acpi_osi=Linux"
+ */
+static void __init __acpi_osi_setup_linux(bool enable)
+{
+ osi_config.linux_enable = !!enable;
+ if (enable)
+ acpi_osi_setup("Linux");
+ else
+ acpi_osi_setup("!Linux");
+}
+
+static void __init acpi_osi_setup_linux(bool enable)
+{
+ /* Override acpi_osi_dmi_blacklisted() */
+ osi_config.linux_dmi = 0;
+ osi_config.linux_cmdline = 1;
+ __acpi_osi_setup_linux(enable);
+}
+
+/*
+ * Modify the list of "OS Interfaces" reported to BIOS via _OSI
+ *
+ * empty string disables _OSI
+ * string starting with '!' disables that string
+ * otherwise string is added to list, augmenting built-in strings
+ */
+static void __init acpi_osi_setup_late(void)
+{
+ struct acpi_osi_entry *osi;
+ char *str;
+ int i;
+ acpi_status status;
+
+ if (osi_config.default_disabling) {
+ status = acpi_update_interfaces(osi_config.default_disabling);
+ if (ACPI_SUCCESS(status))
+ pr_info("Disabled all _OSI OS vendors%s\n",
+ osi_config.default_disabling ==
+ ACPI_DISABLE_ALL_STRINGS ?
+ " and feature groups" : "");
+ }
+
+ for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ osi = &osi_setup_entries[i];
+ str = osi->string;
+ if (*str == '\0')
+ break;
+ if (osi->enable) {
+ status = acpi_install_interface(str);
+ if (ACPI_SUCCESS(status))
+ pr_info("Added _OSI(%s)\n", str);
+ } else {
+ status = acpi_remove_interface(str);
+ if (ACPI_SUCCESS(status))
+ pr_info("Deleted _OSI(%s)\n", str);
+ }
+ }
+}
+
+static int __init osi_setup(char *str)
+{
+ if (str && !strcmp("Linux", str))
+ acpi_osi_setup_linux(true);
+ else if (str && !strcmp("!Linux", str))
+ acpi_osi_setup_linux(false);
+ else if (str && !strcmp("Darwin", str))
+ acpi_osi_setup_darwin(true);
+ else if (str && !strcmp("!Darwin", str))
+ acpi_osi_setup_darwin(false);
+ else
+ acpi_osi_setup(str);
+
+ return 1;
+}
+__setup("acpi_osi=", osi_setup);
+
+bool acpi_osi_is_win8(void)
+{
+ return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
+}
+EXPORT_SYMBOL(acpi_osi_is_win8);
+
+static void __init acpi_osi_dmi_darwin(bool enable,
+ const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident);
+ osi_config.darwin_dmi = 1;
+ __acpi_osi_setup_darwin(enable);
+}
+
+void __init acpi_osi_dmi_linux(bool enable, const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected to setup _OSI(\"Linux\"): %s\n", d->ident);
+ osi_config.linux_dmi = 1;
+ __acpi_osi_setup_linux(enable);
+}
+
+static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d)
+{
+ acpi_osi_dmi_darwin(true, d);
+
+ return 0;
+}
+
+static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
+{
+ acpi_osi_dmi_linux(true, d);
+
+ return 0;
+}
+
+static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2006");
+ acpi_osi_setup("!Windows 2006 SP1");
+ acpi_osi_setup("!Windows 2006 SP2");
+
+ return 0;
+}
+
+static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2009");
+
+ return 0;
+}
+
+static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2012");
+
+ return 0;
+}
+
+/*
+ * Linux default _OSI response behavior is determined by this DMI table.
+ *
+ * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden
+ * by acpi_osi=!Linux/acpi_osi=!Darwin command line options.
+ */
+static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Fujitsu Siemens",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
+ },
+ },
+ {
+ /*
+ * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
+ * driver (e.g. nouveau) when user press brightness hotkey.
+ * Currently, nouveau driver didn't do the job and it causes there
+ * have a infinite while loop in DSDT when user press hotkey.
+ * We add MSI GX723's dmi information to this table for workaround
+ * this issue.
+ * Will remove MSI GX723 from the table after nouveau grows support.
+ */
+ .callback = dmi_disable_osi_vista,
+ .ident = "MSI GX723",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-NS10J_S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-SR290J",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "VGN-NS50B_L",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "VGN-SR19XN",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba Satellite L355",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win7,
+ .ident = "ASUS K50IJ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba P305D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba NB100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
+ },
+ },
+
+ /*
+ * The wireless hotkey does not work on those machines when
+ * returning true for _OSI("Windows 2012")
+ */
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 7737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 7537",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 5437",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 3437",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Vostro 3446",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Vostro 3546",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
+ },
+ },
+
+ /*
+ * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ * Linux ignores it, except for the machines enumerated below.
+ */
+
+ /*
+ * Without this this EEEpc exports a non working WMI interface, with
+ * this it exports a working "good old" eeepc_laptop interface, fixing
+ * both brightness control, and rfkill not working.
+ */
+ {
+ .callback = dmi_enable_osi_linux,
+ .ident = "Asus EEE PC 1015PX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
+ },
+ },
+
+ /*
+ * Enable _OSI("Darwin") for all apple platforms.
+ */
+ {
+ .callback = dmi_enable_osi_darwin,
+ .ident = "Apple hardware",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ },
+ },
+ {
+ .callback = dmi_enable_osi_darwin,
+ .ident = "Apple hardware",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+ },
+ },
+ {}
+};
+
+static __init void acpi_osi_dmi_blacklisted(void)
+{
+ dmi_check_system(acpi_osi_dmi_table);
+}
+
+int __init early_acpi_osi_init(void)
+{
+ acpi_osi_dmi_blacklisted();
+
+ return 0;
+}
+
+int __init acpi_osi_init(void)
+{
+ acpi_install_interface_handler(acpi_osi_handler);
+ acpi_osi_setup_late();
+
+ return 0;
+}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 814d5f83b75e..b108f1358a32 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -56,10 +56,6 @@ struct acpi_os_dpc {
struct work_struct work;
};
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
-#include CONFIG_ACPI_CUSTOM_DSDT_FILE
-#endif
-
#ifdef ENABLE_DEBUGGER
#include <linux/kdb.h>
@@ -96,72 +92,6 @@ struct acpi_ioremap {
static LIST_HEAD(acpi_ioremaps);
static DEFINE_MUTEX(acpi_ioremap_lock);
-static void __init acpi_osi_setup_late(void);
-
-/*
- * The story of _OSI(Linux)
- *
- * From pre-history through Linux-2.6.22,
- * Linux responded TRUE upon a BIOS OSI(Linux) query.
- *
- * Unfortunately, reference BIOS writers got wind of this
- * and put OSI(Linux) in their example code, quickly exposing
- * this string as ill-conceived and opening the door to
- * an un-bounded number of BIOS incompatibilities.
- *
- * For example, OSI(Linux) was used on resume to re-POST a
- * video card on one system, because Linux at that time
- * could not do a speedy restore in its native driver.
- * But then upon gaining quick native restore capability,
- * Linux has no way to tell the BIOS to skip the time-consuming
- * POST -- putting Linux at a permanent performance disadvantage.
- * On another system, the BIOS writer used OSI(Linux)
- * to infer native OS support for IPMI! On other systems,
- * OSI(Linux) simply got in the way of Linux claiming to
- * be compatible with other operating systems, exposing
- * BIOS issues such as skipped device initialization.
- *
- * So "Linux" turned out to be a really poor chose of
- * OSI string, and from Linux-2.6.23 onward we respond FALSE.
- *
- * BIOS writers should NOT query _OSI(Linux) on future systems.
- * Linux will complain on the console when it sees it, and return FALSE.
- * To get Linux to return TRUE for your system will require
- * a kernel source update to add a DMI entry,
- * or boot with "acpi_osi=Linux"
- */
-
-static struct osi_linux {
- unsigned int enable:1;
- unsigned int dmi:1;
- unsigned int cmdline:1;
- unsigned int default_disabling:1;
-} osi_linux = {0, 0, 0, 0};
-
-static u32 acpi_osi_handler(acpi_string interface, u32 supported)
-{
- if (!strcmp("Linux", interface)) {
-
- printk_once(KERN_NOTICE FW_BUG PREFIX
- "BIOS _OSI(Linux) query %s%s\n",
- osi_linux.enable ? "honored" : "ignored",
- osi_linux.cmdline ? " via cmdline" :
- osi_linux.dmi ? " via DMI" : "");
- }
-
- if (!strcmp("Darwin", interface)) {
- /*
- * Apple firmware will behave poorly if it receives positive
- * answers to "Darwin" and any other OS. Respond positively
- * to Darwin and then disable all other vendor strings.
- */
- acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
- supported = ACPI_UINT32_MAX;
- }
-
- return supported;
-}
-
static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
{
@@ -582,7 +512,7 @@ static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
- char **new_val)
+ acpi_string *new_val)
{
if (!init_val || !new_val)
return AE_BAD_PARAMETER;
@@ -602,280 +532,6 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
-static void acpi_table_taint(struct acpi_table_header *table)
-{
- pr_warn(PREFIX
- "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
- table->signature, table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
-}
-
-#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
-#include <linux/earlycpio.h>
-#include <linux/memblock.h>
-
-static u64 acpi_tables_addr;
-static int all_tables_size;
-
-/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
-static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
-{
- u8 sum = 0;
- u8 *end = buffer + length;
-
- while (buffer < end)
- sum = (u8) (sum + *(buffer++));
- return sum;
-}
-
-/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char * const table_sigs[] = {
- ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
- ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
- ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
- ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
- ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
- ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
- ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
- ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
- ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
-
-#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
-
-#define ACPI_OVERRIDE_TABLES 64
-static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
-static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
-
-#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
-
-void __init acpi_initrd_override(void *data, size_t size)
-{
- int sig, no, table_nr = 0, total_offset = 0;
- long offset = 0;
- struct acpi_table_header *table;
- char cpio_path[32] = "kernel/firmware/acpi/";
- struct cpio_data file;
-
- if (data == NULL || size == 0)
- return;
-
- for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
- file = find_cpio_data(cpio_path, data, size, &offset);
- if (!file.data)
- break;
-
- data += offset;
- size -= offset;
-
- if (file.size < sizeof(struct acpi_table_header)) {
- pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
-
- table = file.data;
-
- for (sig = 0; table_sigs[sig]; sig++)
- if (!memcmp(table->signature, table_sigs[sig], 4))
- break;
-
- if (!table_sigs[sig]) {
- pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
- if (file.size != table->length) {
- pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
- if (acpi_table_checksum(file.data, table->length)) {
- pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
-
- pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
- table->signature, cpio_path, file.name, table->length);
-
- all_tables_size += table->length;
- acpi_initrd_files[table_nr].data = file.data;
- acpi_initrd_files[table_nr].size = file.size;
- table_nr++;
- }
- if (table_nr == 0)
- return;
-
- acpi_tables_addr =
- memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
- all_tables_size, PAGE_SIZE);
- if (!acpi_tables_addr) {
- WARN_ON(1);
- return;
- }
- /*
- * Only calling e820_add_reserve does not work and the
- * tables are invalid (memory got used) later.
- * memblock_reserve works as expected and the tables won't get modified.
- * But it's not enough on X86 because ioremap will
- * complain later (used by acpi_os_map_memory) that the pages
- * that should get mapped are not marked "reserved".
- * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
- * works fine.
- */
- memblock_reserve(acpi_tables_addr, all_tables_size);
- arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
-
- /*
- * early_ioremap only can remap 256k one time. If we map all
- * tables one time, we will hit the limit. Need to map chunks
- * one by one during copying the same as that in relocate_initrd().
- */
- for (no = 0; no < table_nr; no++) {
- unsigned char *src_p = acpi_initrd_files[no].data;
- phys_addr_t size = acpi_initrd_files[no].size;
- phys_addr_t dest_addr = acpi_tables_addr + total_offset;
- phys_addr_t slop, clen;
- char *dest_p;
-
- total_offset += size;
-
- while (size) {
- slop = dest_addr & ~PAGE_MASK;
- clen = size;
- if (clen > MAP_CHUNK_SIZE - slop)
- clen = MAP_CHUNK_SIZE - slop;
- dest_p = early_ioremap(dest_addr & PAGE_MASK,
- clen + slop);
- memcpy(dest_p + slop, src_p, clen);
- early_iounmap(dest_p, clen + slop);
- src_p += clen;
- dest_addr += clen;
- size -= clen;
- }
- }
-}
-
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address *address, u32 *length)
-{
- int table_offset = 0;
- int table_index = 0;
- struct acpi_table_header *table;
- u32 table_length;
-
- *length = 0;
- *address = 0;
- if (!acpi_tables_addr)
- return AE_OK;
-
- while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
- table = acpi_os_map_memory(acpi_tables_addr + table_offset,
- ACPI_HEADER_SIZE);
- if (table_offset + table->length > all_tables_size) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- WARN_ON(1);
- return AE_OK;
- }
-
- table_length = table->length;
-
- /* Only override tables matched */
- if (test_bit(table_index, acpi_initrd_installed) ||
- memcmp(existing_table->signature, table->signature, 4) ||
- memcmp(table->oem_table_id, existing_table->oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE)) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- goto next_table;
- }
-
- *length = table_length;
- *address = acpi_tables_addr + table_offset;
- acpi_table_taint(existing_table);
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- set_bit(table_index, acpi_initrd_installed);
- break;
-
-next_table:
- table_offset += table_length;
- table_index++;
- }
- return AE_OK;
-}
-
-void __init acpi_initrd_initialize_tables(void)
-{
- int table_offset = 0;
- int table_index = 0;
- u32 table_length;
- struct acpi_table_header *table;
-
- if (!acpi_tables_addr)
- return;
-
- while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
- table = acpi_os_map_memory(acpi_tables_addr + table_offset,
- ACPI_HEADER_SIZE);
- if (table_offset + table->length > all_tables_size) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- WARN_ON(1);
- return;
- }
-
- table_length = table->length;
-
- /* Skip RSDT/XSDT which should only be used for override */
- if (test_bit(table_index, acpi_initrd_installed) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- goto next_table;
- }
-
- acpi_table_taint(table);
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- acpi_install_table(acpi_tables_addr + table_offset, TRUE);
- set_bit(table_index, acpi_initrd_installed);
-next_table:
- table_offset += table_length;
- table_index++;
- }
-}
-#else
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address *address,
- u32 *table_length)
-{
- *table_length = 0;
- *address = 0;
- return AE_OK;
-}
-
-void __init acpi_initrd_initialize_tables(void)
-{
-}
-#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
-
-acpi_status
-acpi_os_table_override(struct acpi_table_header *existing_table,
- struct acpi_table_header **new_table)
-{
- if (!existing_table || !new_table)
- return AE_BAD_PARAMETER;
-
- *new_table = NULL;
-
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
- if (strncmp(existing_table->signature, "DSDT", 4) == 0)
- *new_table = (struct acpi_table_header *)AmlCode;
-#endif
- if (*new_table != NULL)
- acpi_table_taint(existing_table);
- return AE_OK;
-}
-
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
u32 handled;
@@ -1717,156 +1373,6 @@ static int __init acpi_os_name_setup(char *str)
__setup("acpi_os_name=", acpi_os_name_setup);
-#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
-#define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
-
-struct osi_setup_entry {
- char string[OSI_STRING_LENGTH_MAX];
- bool enable;
-};
-
-static struct osi_setup_entry
- osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
- {"Module Device", true},
- {"Processor Device", true},
- {"3.0 _SCP Extensions", true},
- {"Processor Aggregator Device", true},
-};
-
-void __init acpi_osi_setup(char *str)
-{
- struct osi_setup_entry *osi;
- bool enable = true;
- int i;
-
- if (!acpi_gbl_create_osi_method)
- return;
-
- if (str == NULL || *str == '\0') {
- printk(KERN_INFO PREFIX "_OSI method disabled\n");
- acpi_gbl_create_osi_method = FALSE;
- return;
- }
-
- if (*str == '!') {
- str++;
- if (*str == '\0') {
- osi_linux.default_disabling = 1;
- return;
- } else if (*str == '*') {
- acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
- for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
- osi = &osi_setup_entries[i];
- osi->enable = false;
- }
- return;
- }
- enable = false;
- }
-
- for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
- osi = &osi_setup_entries[i];
- if (!strcmp(osi->string, str)) {
- osi->enable = enable;
- break;
- } else if (osi->string[0] == '\0') {
- osi->enable = enable;
- strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
- break;
- }
- }
-}
-
-static void __init set_osi_linux(unsigned int enable)
-{
- if (osi_linux.enable != enable)
- osi_linux.enable = enable;
-
- if (osi_linux.enable)
- acpi_osi_setup("Linux");
- else
- acpi_osi_setup("!Linux");
-
- return;
-}
-
-static void __init acpi_cmdline_osi_linux(unsigned int enable)
-{
- osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
- osi_linux.dmi = 0;
- set_osi_linux(enable);
-
- return;
-}
-
-void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
-
- if (enable == -1)
- return;
-
- osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
- set_osi_linux(enable);
-
- return;
-}
-
-/*
- * Modify the list of "OS Interfaces" reported to BIOS via _OSI
- *
- * empty string disables _OSI
- * string starting with '!' disables that string
- * otherwise string is added to list, augmenting built-in strings
- */
-static void __init acpi_osi_setup_late(void)
-{
- struct osi_setup_entry *osi;
- char *str;
- int i;
- acpi_status status;
-
- if (osi_linux.default_disabling) {
- status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
-
- if (ACPI_SUCCESS(status))
- printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
- }
-
- for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
- osi = &osi_setup_entries[i];
- str = osi->string;
-
- if (*str == '\0')
- break;
- if (osi->enable) {
- status = acpi_install_interface(str);
-
- if (ACPI_SUCCESS(status))
- printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
- } else {
- status = acpi_remove_interface(str);
-
- if (ACPI_SUCCESS(status))
- printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
- }
- }
-}
-
-static int __init osi_setup(char *str)
-{
- if (str && !strcmp("Linux", str))
- acpi_cmdline_osi_linux(1);
- else if (str && !strcmp("!Linux", str))
- acpi_cmdline_osi_linux(0);
- else
- acpi_osi_setup(str);
-
- return 1;
-}
-
-__setup("acpi_osi=", osi_setup);
-
/*
* Disable the auto-serialization of named objects creation methods.
*
@@ -1986,12 +1492,6 @@ int acpi_resources_are_enforced(void)
}
EXPORT_SYMBOL(acpi_resources_are_enforced);
-bool acpi_osi_is_win8(void)
-{
- return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
-}
-EXPORT_SYMBOL(acpi_osi_is_win8);
-
/*
* Deallocate the memory for a spinlock.
*/
@@ -2157,8 +1657,7 @@ acpi_status __init acpi_os_initialize1(void)
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
- acpi_install_interface_handler(acpi_osi_handler);
- acpi_osi_setup_late();
+ acpi_osi_init();
return AE_OK;
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ededa909df2f..8fc7323ed3e8 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/irq.h>
#include "internal.h"
@@ -437,17 +438,15 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
* enabled system.
*/
-#define ACPI_MAX_IRQS 256
-#define ACPI_MAX_ISA_IRQ 16
+#define ACPI_MAX_ISA_IRQS 16
-#define PIRQ_PENALTY_PCI_AVAILABLE (0)
#define PIRQ_PENALTY_PCI_POSSIBLE (16*16)
#define PIRQ_PENALTY_PCI_USING (16*16*16)
#define PIRQ_PENALTY_ISA_TYPICAL (16*16*16*16)
#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
-static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
+static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = {
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -457,9 +456,9 @@ static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ6 */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ7 parallel, spurious */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ8 rtc, sometimes */
- PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ9 PCI, often acpi */
- PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ10 PCI */
- PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ11 PCI */
+ 0, /* IRQ9 PCI, often acpi */
+ 0, /* IRQ10 PCI */
+ 0, /* IRQ11 PCI */
PIRQ_PENALTY_ISA_USED, /* IRQ12 mouse */
PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
@@ -467,39 +466,58 @@ static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
/* >IRQ15 */
};
-int __init acpi_irq_penalty_init(void)
+static int acpi_irq_pci_sharing_penalty(int irq)
{
struct acpi_pci_link *link;
- int i;
+ int penalty = 0;
- /*
- * Update penalties to facilitate IRQ balancing.
- */
list_for_each_entry(link, &acpi_link_list, list) {
-
/*
- * reflect the possible and active irqs in the penalty table --
- * useful for breaking ties.
+ * If a link is active, penalize its IRQ heavily
+ * so we try to choose a different IRQ.
*/
- if (link->irq.possible_count) {
- int penalty =
- PIRQ_PENALTY_PCI_POSSIBLE /
- link->irq.possible_count;
-
- for (i = 0; i < link->irq.possible_count; i++) {
- if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
- acpi_irq_penalty[link->irq.
- possible[i]] +=
- penalty;
- }
-
- } else if (link->irq.active) {
- acpi_irq_penalty[link->irq.active] +=
- PIRQ_PENALTY_PCI_POSSIBLE;
+ if (link->irq.active && link->irq.active == irq)
+ penalty += PIRQ_PENALTY_PCI_USING;
+ else {
+ int i;
+
+ /*
+ * If a link is inactive, penalize the IRQs it
+ * might use, but not as severely.
+ */
+ for (i = 0; i < link->irq.possible_count; i++)
+ if (link->irq.possible[i] == irq)
+ penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
}
}
- return 0;
+ return penalty;
+}
+
+static int acpi_irq_get_penalty(int irq)
+{
+ int penalty = 0;
+
+ if (irq < ACPI_MAX_ISA_IRQS)
+ penalty += acpi_isa_irq_penalty[irq];
+
+ /*
+ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
+ * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
+ * use for PCI IRQs.
+ */
+ if (irq == acpi_gbl_FADT.sci_interrupt) {
+ u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
+
+ if (type != IRQ_TYPE_LEVEL_LOW)
+ penalty += PIRQ_PENALTY_ISA_ALWAYS;
+ else
+ penalty += PIRQ_PENALTY_PCI_USING;
+ }
+
+ penalty += acpi_irq_pci_sharing_penalty(irq);
+ return penalty;
}
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
@@ -547,12 +565,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
* the use of IRQs 9, 10, 11, and >15.
*/
for (i = (link->irq.possible_count - 1); i >= 0; i--) {
- if (acpi_irq_penalty[irq] >
- acpi_irq_penalty[link->irq.possible[i]])
+ if (acpi_irq_get_penalty(irq) >
+ acpi_irq_get_penalty(link->irq.possible[i]))
irq = link->irq.possible[i];
}
}
- if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+ if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
"Try pci=noacpi or acpi=off\n",
acpi_device_name(link->device),
@@ -568,7 +586,6 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
- acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -778,7 +795,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
}
/*
- * modify acpi_irq_penalty[] from cmdline
+ * modify acpi_isa_irq_penalty[] from cmdline
*/
static int __init acpi_irq_penalty_update(char *str, int used)
{
@@ -787,23 +804,24 @@ static int __init acpi_irq_penalty_update(char *str, int used)
for (i = 0; i < 16; i++) {
int retval;
int irq;
+ int new_penalty;
retval = get_option(&str, &irq);
if (!retval)
break; /* no number found */
- if (irq < 0)
- continue;
-
- if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+ /* see if this is a ISA IRQ */
+ if ((irq < 0) || (irq >= ACPI_MAX_ISA_IRQS))
continue;
if (used)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ new_penalty = acpi_irq_get_penalty(irq) +
+ PIRQ_PENALTY_ISA_USED;
else
- acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
+ new_penalty = 0;
+ acpi_isa_irq_penalty[irq] = new_penalty;
if (retval != 2) /* no next number */
break;
}
@@ -819,34 +837,15 @@ static int __init acpi_irq_penalty_update(char *str, int used)
*/
void acpi_penalize_isa_irq(int irq, int active)
{
- if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
- if (active)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
- else
- acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
- }
+ if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
+ acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING;
}
bool acpi_isa_irq_available(int irq)
{
- return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
- acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
-}
-
-/*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
- * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
- * PCI IRQs.
- */
-void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
-{
- if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
- if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
- polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
- else
- acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
- }
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_isa_irq_penalty) ||
+ acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
/*
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2a8b59644297..7a2e4d45b266 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -26,6 +26,11 @@
#include "internal.h"
#include "sleep.h"
+/*
+ * Some HW-full platforms do not have _S5, so they may need
+ * to leverage efi power off for a shutdown.
+ */
+bool acpi_no_s5;
static u8 sleep_states[ACPI_S_STATE_COUNT];
static void acpi_sleep_tts_switch(u32 acpi_state)
@@ -882,6 +887,8 @@ int __init acpi_sleep_init(void)
sleep_states[ACPI_STATE_S5] = 1;
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
+ } else {
+ acpi_no_s5 = true;
}
supported[0] = 0;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0243d375c6fd..4b3a9e27f1b6 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -555,23 +555,22 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
static int get_status(u32 index, acpi_event_status *status,
acpi_handle *handle)
{
- int result = 0;
+ int result;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
- goto end;
+ return -EINVAL;
if (index < num_gpes) {
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x", index));
- goto end;
+ return result;
}
result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
-end:
return result;
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f49c02442d65..a372f9eaa15d 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -32,8 +32,14 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
+#include <linux/earlycpio.h>
+#include <linux/memblock.h>
#include "internal.h"
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+#include CONFIG_ACPI_CUSTOM_DSDT_FILE
+#endif
+
#define ACPI_MAX_TABLES 128
static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
@@ -433,6 +439,314 @@ static void __init check_multiple_madt(void)
return;
}
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+ pr_warn("Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+ table->signature, table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+}
+
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+static u64 acpi_tables_addr;
+static int all_tables_size;
+
+/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
+static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end)
+ sum = (u8) (sum + *(buffer++));
+ return sum;
+}
+
+/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
+static const char * const table_sigs[] = {
+ ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
+ ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
+ ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
+ ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
+ ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
+ ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
+ ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
+ ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
+ ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
+
+#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
+
+#define NR_ACPI_INITRD_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[NR_ACPI_INITRD_TABLES];
+static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES);
+
+#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
+
+static void __init acpi_table_initrd_init(void *data, size_t size)
+{
+ int sig, no, table_nr = 0, total_offset = 0;
+ long offset = 0;
+ struct acpi_table_header *table;
+ char cpio_path[32] = "kernel/firmware/acpi/";
+ struct cpio_data file;
+
+ if (data == NULL || size == 0)
+ return;
+
+ for (no = 0; no < NR_ACPI_INITRD_TABLES; no++) {
+ file = find_cpio_data(cpio_path, data, size, &offset);
+ if (!file.data)
+ break;
+
+ data += offset;
+ size -= offset;
+
+ if (file.size < sizeof(struct acpi_table_header)) {
+ pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+
+ table = file.data;
+
+ for (sig = 0; table_sigs[sig]; sig++)
+ if (!memcmp(table->signature, table_sigs[sig], 4))
+ break;
+
+ if (!table_sigs[sig]) {
+ pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+ if (file.size != table->length) {
+ pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+ if (acpi_table_checksum(file.data, table->length)) {
+ pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+
+ pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
+ table->signature, cpio_path, file.name, table->length);
+
+ all_tables_size += table->length;
+ acpi_initrd_files[table_nr].data = file.data;
+ acpi_initrd_files[table_nr].size = file.size;
+ table_nr++;
+ }
+ if (table_nr == 0)
+ return;
+
+ acpi_tables_addr =
+ memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+ all_tables_size, PAGE_SIZE);
+ if (!acpi_tables_addr) {
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Only calling e820_add_reserve does not work and the
+ * tables are invalid (memory got used) later.
+ * memblock_reserve works as expected and the tables won't get modified.
+ * But it's not enough on X86 because ioremap will
+ * complain later (used by acpi_os_map_memory) that the pages
+ * that should get mapped are not marked "reserved".
+ * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
+ * works fine.
+ */
+ memblock_reserve(acpi_tables_addr, all_tables_size);
+ arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+
+ /*
+ * early_ioremap only can remap 256k one time. If we map all
+ * tables one time, we will hit the limit. Need to map chunks
+ * one by one during copying the same as that in relocate_initrd().
+ */
+ for (no = 0; no < table_nr; no++) {
+ unsigned char *src_p = acpi_initrd_files[no].data;
+ phys_addr_t size = acpi_initrd_files[no].size;
+ phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+ phys_addr_t slop, clen;
+ char *dest_p;
+
+ total_offset += size;
+
+ while (size) {
+ slop = dest_addr & ~PAGE_MASK;
+ clen = size;
+ if (clen > MAP_CHUNK_SIZE - slop)
+ clen = MAP_CHUNK_SIZE - slop;
+ dest_p = early_ioremap(dest_addr & PAGE_MASK,
+ clen + slop);
+ memcpy(dest_p + slop, src_p, clen);
+ early_iounmap(dest_p, clen + slop);
+ src_p += clen;
+ dest_addr += clen;
+ size -= clen;
+ }
+ }
+}
+
+static acpi_status
+acpi_table_initrd_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address, u32 *length)
+{
+ int table_offset = 0;
+ int table_index = 0;
+ struct acpi_table_header *table;
+ u32 table_length;
+
+ *length = 0;
+ *address = 0;
+ if (!acpi_tables_addr)
+ return AE_OK;
+
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return AE_OK;
+ }
+
+ table_length = table->length;
+
+ /* Only override tables matched */
+ if (memcmp(existing_table->signature, table->signature, 4) ||
+ memcmp(table->oem_id, existing_table->oem_id,
+ ACPI_OEM_ID_SIZE) ||
+ memcmp(table->oem_table_id, existing_table->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+ /*
+ * Mark the table to avoid being used in
+ * acpi_table_initrd_scan() and check the revision.
+ */
+ if (test_and_set_bit(table_index, acpi_initrd_installed) ||
+ existing_table->oem_revision >= table->oem_revision) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+
+ *length = table_length;
+ *address = acpi_tables_addr + table_offset;
+ pr_info("Table Upgrade: override [%4.4s-%6.6s-%8.8s]\n",
+ table->signature, table->oem_id,
+ table->oem_table_id);
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ break;
+
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
+ return AE_OK;
+}
+
+static void __init acpi_table_initrd_scan(void)
+{
+ int table_offset = 0;
+ int table_index = 0;
+ u32 table_length;
+ struct acpi_table_header *table;
+
+ if (!acpi_tables_addr)
+ return;
+
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return;
+ }
+
+ table_length = table->length;
+
+ /* Skip RSDT/XSDT which should only be used for override */
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+ /*
+ * Mark the table to avoid being used in
+ * acpi_table_initrd_override(). Though this is not possible
+ * because override is disabled in acpi_install_table().
+ */
+ if (test_and_set_bit(table_index, acpi_initrd_installed)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+
+ pr_info("Table Upgrade: install [%4.4s-%6.6s-%8.8s]\n",
+ table->signature, table->oem_id,
+ table->oem_table_id);
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
+}
+#else
+static void __init acpi_table_initrd_init(void *data, size_t size)
+{
+}
+
+static acpi_status
+acpi_table_initrd_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address,
+ u32 *table_length)
+{
+ *table_length = 0;
+ *address = 0;
+ return AE_OK;
+}
+
+static void __init acpi_table_initrd_scan(void)
+{
+}
+#endif /* CONFIG_ACPI_TABLE_UPGRADE */
+
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address,
+ u32 *table_length)
+{
+ return acpi_table_initrd_override(existing_table, address,
+ table_length);
+}
+
+acpi_status
+acpi_os_table_override(struct acpi_table_header *existing_table,
+ struct acpi_table_header **new_table)
+{
+ if (!existing_table || !new_table)
+ return AE_BAD_PARAMETER;
+
+ *new_table = NULL;
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+ if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+ *new_table = (struct acpi_table_header *)AmlCode;
+#endif
+ if (*new_table != NULL)
+ acpi_table_taint(existing_table);
+ return AE_OK;
+}
+
+void __init early_acpi_table_init(void *data, size_t size)
+{
+ acpi_table_initrd_init(data, size);
+}
+
/*
* acpi_table_init()
*
@@ -457,7 +771,7 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return -EINVAL;
- acpi_initrd_initialize_tables();
+ acpi_table_initrd_scan();
check_multiple_madt();
return 0;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 050673f0c0b3..ac832bf6f8c9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -707,7 +707,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
EXPORT_SYMBOL(acpi_check_dsm);
/**
- * acpi_dev_present - Detect presence of a given ACPI device in the system.
+ * acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
* Return %true if the device was present at the moment of invocation.
@@ -719,7 +719,7 @@ EXPORT_SYMBOL(acpi_check_dsm);
* instead). Calling from module_init() is fine (which is synonymous
* with device_initcall()).
*/
-bool acpi_dev_present(const char *hid)
+bool acpi_dev_found(const char *hid)
{
struct acpi_device_bus_id *acpi_device_bus_id;
bool found = false;
@@ -734,7 +734,7 @@ bool acpi_dev_present(const char *hid)
return found;
}
-EXPORT_SYMBOL(acpi_dev_present);
+EXPORT_SYMBOL(acpi_dev_found);
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 1316ddd92fac..3d1327615f72 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -358,7 +358,7 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
if (!(video_caps & ACPI_VIDEO_BACKLIGHT))
return acpi_backlight_vendor;
- if (acpi_osi_is_win8() && backlight_device_registered(BACKLIGHT_RAW))
+ if (acpi_osi_is_win8() && backlight_device_get_by_type(BACKLIGHT_RAW))
return acpi_backlight_native;
return acpi_backlight_video;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f437afa17f2b..6482d47deb50 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -322,16 +322,16 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
/**
* platform_device_add_properties - add built-in properties to a platform device
* @pdev: platform device to add properties to
- * @pset: properties to add
+ * @properties: null terminated array of properties to add
*
- * The function will take deep copy of the properties in @pset and attach
- * the copy to the platform device. The memory associated with properties
- * will be freed when the platform device is released.
+ * The function will take deep copy of @properties and attach the copy to the
+ * platform device. The memory associated with properties will be freed when the
+ * platform device is released.
*/
int platform_device_add_properties(struct platform_device *pdev,
- const struct property_set *pset)
+ struct property_entry *properties)
{
- return device_add_property_set(&pdev->dev, pset);
+ return device_add_properties(&pdev->dev, properties);
}
EXPORT_SYMBOL_GPL(platform_device_add_properties);
@@ -447,7 +447,7 @@ void platform_device_del(struct platform_device *pdev)
release_resource(r);
}
- device_remove_property_set(&pdev->dev);
+ device_remove_properties(&pdev->dev);
}
}
EXPORT_SYMBOL_GPL(platform_device_del);
@@ -526,8 +526,9 @@ struct platform_device *platform_device_register_full(
if (ret)
goto err;
- if (pdevinfo->pset) {
- ret = platform_device_add_properties(pdev, pdevinfo->pset);
+ if (pdevinfo->properties) {
+ ret = platform_device_add_properties(pdev,
+ pdevinfo->properties);
if (ret)
goto err;
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 0e64a1b5e62a..3657ac1cb801 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -159,7 +159,7 @@ int of_pm_clk_add_clks(struct device *dev)
count = of_count_phandle_with_args(dev->of_node, "clocks",
"#clock-cells");
- if (count == 0)
+ if (count <= 0)
return -ENODEV;
clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 56705b52758e..de23b648fce3 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -229,17 +229,6 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
return ret;
}
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
-}
-
-static int genpd_restore_dev(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
-}
-
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -372,17 +361,63 @@ static void genpd_power_off_work_fn(struct work_struct *work)
}
/**
- * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_suspend;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_suspend;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_resume;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_resume;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
* @dev: Device to suspend.
*
* Carry out a runtime suspend of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
-static int pm_genpd_runtime_suspend(struct device *dev)
+static int genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
- bool (*stop_ok)(struct device *__dev);
+ bool (*suspend_ok)(struct device *__dev);
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
@@ -401,21 +436,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
* runtime PM is disabled. Under these circumstances, we shall skip
* validating/measuring the PM QoS latency.
*/
- stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
- if (runtime_pm && stop_ok && !stop_ok(dev))
+ suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
+ if (runtime_pm && suspend_ok && !suspend_ok(dev))
return -EBUSY;
/* Measure suspend latency. */
if (runtime_pm)
time_start = ktime_get();
- ret = genpd_save_dev(genpd, dev);
+ ret = __genpd_runtime_suspend(dev);
if (ret)
return ret;
ret = genpd_stop_dev(genpd, dev);
if (ret) {
- genpd_restore_dev(genpd, dev);
+ __genpd_runtime_resume(dev);
return ret;
}
@@ -446,14 +481,14 @@ static int pm_genpd_runtime_suspend(struct device *dev)
}
/**
- * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
* @dev: Device to resume.
*
* Carry out a runtime resume of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
-static int pm_genpd_runtime_resume(struct device *dev)
+static int genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
@@ -491,7 +526,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (ret)
goto err_poweroff;
- ret = genpd_restore_dev(genpd, dev);
+ ret = __genpd_runtime_resume(dev);
if (ret)
goto err_stop;
@@ -695,15 +730,6 @@ static int pm_genpd_prepare(struct device *dev)
* at this point and a system wakeup event should be reported if it's
* set up to wake up the system from sleep states.
*/
- pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
-
- if (pm_wakeup_pending()) {
- pm_runtime_put(dev);
- return -EBUSY;
- }
-
if (resume_needed(dev, genpd))
pm_runtime_resume(dev);
@@ -716,10 +742,8 @@ static int pm_genpd_prepare(struct device *dev)
mutex_unlock(&genpd->lock);
- if (genpd->suspend_power_off) {
- pm_runtime_put_noidle(dev);
+ if (genpd->suspend_power_off)
return 0;
- }
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
@@ -741,7 +765,6 @@ static int pm_genpd_prepare(struct device *dev)
pm_runtime_enable(dev);
}
- pm_runtime_put(dev);
return ret;
}
@@ -1427,54 +1450,6 @@ out:
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
-/* Default device callbacks for generic PM domains. */
-
-/**
- * pm_genpd_default_save_state - Default "save device state" for PM domains.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_save_state(struct device *dev)
-{
- int (*cb)(struct device *__dev);
-
- if (dev->type && dev->type->pm)
- cb = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- cb = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- cb = dev->bus->pm->runtime_suspend;
- else
- cb = NULL;
-
- if (!cb && dev->driver && dev->driver->pm)
- cb = dev->driver->pm->runtime_suspend;
-
- return cb ? cb(dev) : 0;
-}
-
-/**
- * pm_genpd_default_restore_state - Default PM domains "restore device state".
- * @dev: Device to handle.
- */
-static int pm_genpd_default_restore_state(struct device *dev)
-{
- int (*cb)(struct device *__dev);
-
- if (dev->type && dev->type->pm)
- cb = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- cb = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- cb = dev->bus->pm->runtime_resume;
- else
- cb = NULL;
-
- if (!cb && dev->driver && dev->driver->pm)
- cb = dev->driver->pm->runtime_resume;
-
- return cb ? cb(dev) : 0;
-}
-
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -1498,8 +1473,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = true;
- genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
- genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+ genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
+ genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
@@ -1520,8 +1495,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.restore_early = pm_genpd_resume_early;
genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
- genpd->dev_ops.save_state = pm_genpd_default_save_state;
- genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 00a5436dd44b..2e0fce711135 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -37,10 +37,10 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
}
/**
- * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * default_suspend_ok - Default PM domain governor routine to suspend devices.
* @dev: Device to check.
*/
-static bool default_stop_ok(struct device *dev)
+static bool default_suspend_ok(struct device *dev)
{
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
unsigned long flags;
@@ -51,13 +51,13 @@ static bool default_stop_ok(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
if (!td->constraint_changed) {
- bool ret = td->cached_stop_ok;
+ bool ret = td->cached_suspend_ok;
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
td->constraint_changed = false;
- td->cached_stop_ok = false;
+ td->cached_suspend_ok = false;
td->effective_constraint_ns = -1;
constraint_ns = __dev_pm_qos_read_value(dev);
@@ -83,13 +83,13 @@ static bool default_stop_ok(struct device *dev)
return false;
}
td->effective_constraint_ns = constraint_ns;
- td->cached_stop_ok = constraint_ns >= 0;
+ td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
- * their stop latencies into account here.
+ * their suspend latencies into account here.
*/
- return td->cached_stop_ok;
+ return td->cached_suspend_ok;
}
/**
@@ -150,7 +150,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*/
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
- /* default_stop_ok() need not be called before us. */
+ /* default_suspend_ok() need not be called before us. */
if (constraint_ns < 0) {
constraint_ns = dev_pm_qos_read_value(pdd->dev);
constraint_ns *= NSEC_PER_USEC;
@@ -227,7 +227,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
}
struct dev_power_governor simple_qos_governor = {
- .stop_ok = default_stop_ok,
+ .suspend_ok = default_suspend_ok,
.power_down_ok = default_power_down_ok,
};
@@ -236,5 +236,5 @@ struct dev_power_governor simple_qos_governor = {
*/
struct dev_power_governor pm_domain_always_on_gov = {
.power_down_ok = always_on_power_down_ok,
- .stop_ok = default_stop_ok,
+ .suspend_ok = default_suspend_ok,
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 6e7c3ccea24b..c81667d4bb60 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1556,7 +1556,6 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
- char *info = NULL;
int ret = 0;
if (dev->power.syscore)
@@ -1579,24 +1578,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
goto unlock;
}
- if (dev->pm_domain) {
- info = "preparing power domain ";
+ if (dev->pm_domain)
callback = dev->pm_domain->ops.prepare;
- } else if (dev->type && dev->type->pm) {
- info = "preparing type ";
+ else if (dev->type && dev->type->pm)
callback = dev->type->pm->prepare;
- } else if (dev->class && dev->class->pm) {
- info = "preparing class ";
+ else if (dev->class && dev->class->pm)
callback = dev->class->pm->prepare;
- } else if (dev->bus && dev->bus->pm) {
- info = "preparing bus ";
+ else if (dev->bus && dev->bus->pm)
callback = dev->bus->pm->prepare;
- }
- if (!callback && dev->driver && dev->driver->pm) {
- info = "preparing driver ";
+ if (!callback && dev->driver && dev->driver->pm)
callback = dev->driver->pm->prepare;
- }
if (callback)
ret = callback(dev);
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
index 19837ef04d8e..e70ceb406fe9 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/base/power/opp/Makefile
@@ -1,3 +1,4 @@
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
+obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index d8f4cc22856c..7c04c87738a6 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/of.h>
#include <linux/export.h>
#include <linux/regulator/consumer.h>
@@ -29,7 +28,7 @@
* from here, with each opp_table containing the list of opps it supports in
* various states of availability.
*/
-static LIST_HEAD(opp_tables);
+LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
@@ -53,26 +52,6 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
return NULL;
}
-static struct opp_table *_managed_opp(const struct device_node *np)
-{
- struct opp_table *opp_table;
-
- list_for_each_entry_rcu(opp_table, &opp_tables, node) {
- if (opp_table->np == np) {
- /*
- * Multiple devices can point to the same OPP table and
- * so will have same node-pointer, np.
- *
- * But the OPPs will be considered as shared only if the
- * OPP table contains a "opp-shared" property.
- */
- return opp_table->shared_opp ? opp_table : NULL;
- }
- }
-
- return NULL;
-}
-
/**
* _find_opp_table() - find opp_table struct using device pointer
* @dev: device pointer used to lookup OPP table
@@ -757,7 +736,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
{
struct opp_table *opp_table;
struct opp_device *opp_dev;
- struct device_node *np;
int ret;
/* Check for existing table for 'dev' first */
@@ -781,20 +759,7 @@ static struct opp_table *_add_opp_table(struct device *dev)
return NULL;
}
- /*
- * Only required for backward compatibility with v1 bindings, but isn't
- * harmful for other cases. And so we do it unconditionally.
- */
- np = of_node_get(dev->of_node);
- if (np) {
- u32 val;
-
- if (!of_property_read_u32(np, "clock-latency", &val))
- opp_table->clock_latency_ns_max = val;
- of_property_read_u32(np, "voltage-tolerance",
- &opp_table->voltage_tolerance_v1);
- of_node_put(np);
- }
+ _of_init_opp_table(opp_table, dev);
/* Set regulator to a non-NULL error value */
opp_table->regulator = ERR_PTR(-ENXIO);
@@ -890,8 +855,8 @@ static void _kfree_opp_rcu(struct rcu_head *head)
* It is assumed that the caller holds required mutex for an RCU updater
* strategy.
*/
-static void _opp_remove(struct opp_table *opp_table,
- struct dev_pm_opp *opp, bool notify)
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
+ bool notify)
{
/*
* Notify the changes in the availability of the operable
@@ -952,8 +917,8 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
-static struct dev_pm_opp *_allocate_opp(struct device *dev,
- struct opp_table **opp_table)
+struct dev_pm_opp *_allocate_opp(struct device *dev,
+ struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
@@ -989,8 +954,8 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
-static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct opp_table *opp_table)
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+ struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
struct list_head *head = &opp_table->opp_list;
@@ -1066,8 +1031,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
*/
-static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
- bool dynamic)
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
+ bool dynamic)
{
struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
@@ -1112,83 +1077,6 @@ unlock:
return ret;
}
-/* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
- struct opp_table *opp_table)
-{
- u32 microvolt[3] = {0};
- u32 val;
- int count, ret;
- struct property *prop = NULL;
- char name[NAME_MAX];
-
- /* Search for "opp-microvolt-<name>" */
- if (opp_table->prop_name) {
- snprintf(name, sizeof(name), "opp-microvolt-%s",
- opp_table->prop_name);
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (!prop) {
- /* Search for "opp-microvolt" */
- sprintf(name, "opp-microvolt");
- prop = of_find_property(opp->np, name, NULL);
-
- /* Missing property isn't a problem, but an invalid entry is */
- if (!prop)
- return 0;
- }
-
- count = of_property_count_u32_elems(opp->np, name);
- if (count < 0) {
- dev_err(dev, "%s: Invalid %s property (%d)\n",
- __func__, name, count);
- return count;
- }
-
- /* There can be one or three elements here */
- if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
- __func__, name, count);
- return -EINVAL;
- }
-
- ret = of_property_read_u32_array(opp->np, name, microvolt, count);
- if (ret) {
- dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
- return -EINVAL;
- }
-
- opp->u_volt = microvolt[0];
-
- if (count == 1) {
- opp->u_volt_min = opp->u_volt;
- opp->u_volt_max = opp->u_volt;
- } else {
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
- }
-
- /* Search for "opp-microamp-<name>" */
- prop = NULL;
- if (opp_table->prop_name) {
- snprintf(name, sizeof(name), "opp-microamp-%s",
- opp_table->prop_name);
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (!prop) {
- /* Search for "opp-microamp" */
- sprintf(name, "opp-microamp");
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (prop && !of_property_read_u32(opp->np, name, &val))
- opp->u_amp = val;
-
- return 0;
-}
-
/**
* dev_pm_opp_set_supported_hw() - Set supported platforms
* @dev: Device for which supported-hw has to be set.
@@ -1517,144 +1405,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
-static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
- struct device_node *np)
-{
- unsigned int count = opp_table->supported_hw_count;
- u32 version;
- int ret;
-
- if (!opp_table->supported_hw)
- return true;
-
- while (count--) {
- ret = of_property_read_u32_index(np, "opp-supported-hw", count,
- &version);
- if (ret) {
- dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
- __func__, count, ret);
- return false;
- }
-
- /* Both of these are bitwise masks of the versions */
- if (!(version & opp_table->supported_hw[count]))
- return false;
- }
-
- return true;
-}
-
-/**
- * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
- * @dev: device for which we do this operation
- * @np: device node
- *
- * This function adds an opp definition to the opp table and returns status. The
- * opp can be controlled using dev_pm_opp_enable/disable functions and may be
- * removed by dev_pm_opp_remove.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -EINVAL Failed parsing the OPP node
- */
-static int _opp_add_static_v2(struct device *dev, struct device_node *np)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *new_opp;
- u64 rate;
- u32 val;
- int ret;
-
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
- new_opp = _allocate_opp(dev, &opp_table);
- if (!new_opp) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (ret < 0) {
- dev_err(dev, "%s: opp-hz not found\n", __func__);
- goto free_opp;
- }
-
- /* Check if the OPP supports hardware's hierarchy of versions or not */
- if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
- goto free_opp;
- }
-
- /*
- * Rate is defined as an unsigned long in clk API, and so casting
- * explicitly to its type. Must be fixed once rate is 64 bit
- * guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
- new_opp->turbo = of_property_read_bool(np, "turbo-mode");
-
- new_opp->np = np;
- new_opp->dynamic = false;
- new_opp->available = true;
-
- if (!of_property_read_u32(np, "clock-latency-ns", &val))
- new_opp->clock_latency_ns = val;
-
- ret = opp_parse_supplies(new_opp, dev, opp_table);
- if (ret)
- goto free_opp;
-
- ret = _opp_add(dev, new_opp, opp_table);
- if (ret)
- goto free_opp;
-
- /* OPP to select on device suspend */
- if (of_property_read_bool(np, "opp-suspend")) {
- if (opp_table->suspend_opp) {
- dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, opp_table->suspend_opp->rate,
- new_opp->rate);
- } else {
- new_opp->suspend = true;
- opp_table->suspend_opp = new_opp;
- }
- }
-
- if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
- opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
-
- mutex_unlock(&opp_table_lock);
-
- pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
- __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
- new_opp->u_volt_min, new_opp->u_volt_max,
- new_opp->clock_latency_ns);
-
- /*
- * Notify the changes in the availability of the operable
- * frequency/voltage list.
- */
- srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
- return 0;
-
-free_opp:
- _opp_remove(opp_table, new_opp, false);
-unlock:
- mutex_unlock(&opp_table_lock);
- return ret;
-}
-
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
@@ -1842,21 +1592,11 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
-#ifdef CONFIG_OF
-/**
- * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
- * entries
- * @dev: device pointer used to lookup OPP table.
- *
- * Free OPPs created using static entries present in DT.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
+/*
+ * Free OPPs either created using static entries present in DT or even the
+ * dynamically added entries based on remove_all param.
*/
-void dev_pm_opp_of_remove_table(struct device *dev)
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp, *tmp;
@@ -1881,7 +1621,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
if (list_is_singular(&opp_table->dev_list)) {
/* Free static OPPs */
list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (!opp->dynamic)
+ if (remove_all || !opp->dynamic)
_opp_remove(opp_table, opp, true);
}
} else {
@@ -1891,160 +1631,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
unlock:
mutex_unlock(&opp_table_lock);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
-{
- /*
- * TODO: Support for multiple OPP tables.
- *
- * There should be only ONE phandle present in "operating-points-v2"
- * property.
- */
-
- return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
-}
-
-/* Initializes OPP tables based on new bindings */
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
-{
- struct device_node *np;
- struct opp_table *opp_table;
- int ret = 0, count = 0;
-
- mutex_lock(&opp_table_lock);
-
- opp_table = _managed_opp(opp_np);
- if (opp_table) {
- /* OPPs are already managed */
- if (!_add_opp_dev(dev, opp_table))
- ret = -ENOMEM;
- mutex_unlock(&opp_table_lock);
- return ret;
- }
- mutex_unlock(&opp_table_lock);
-
- /* We have opp-table node now, iterate over it and add OPPs */
- for_each_available_child_of_node(opp_np, np) {
- count++;
-
- ret = _opp_add_static_v2(dev, np);
- if (ret) {
- dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
- ret);
- goto free_table;
- }
- }
-
- /* There should be one of more OPP defined */
- if (WARN_ON(!count))
- return -ENOENT;
-
- mutex_lock(&opp_table_lock);
-
- opp_table = _find_opp_table(dev);
- if (WARN_ON(IS_ERR(opp_table))) {
- ret = PTR_ERR(opp_table);
- mutex_unlock(&opp_table_lock);
- goto free_table;
- }
-
- opp_table->np = opp_np;
- opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
-
- mutex_unlock(&opp_table_lock);
-
- return 0;
-
-free_table:
- dev_pm_opp_of_remove_table(dev);
-
- return ret;
-}
-
-/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_add_opp_table_v1(struct device *dev)
-{
- const struct property *prop;
- const __be32 *val;
- int nr;
-
- prop = of_find_property(dev->of_node, "operating-points", NULL);
- if (!prop)
- return -ENODEV;
- if (!prop->value)
- return -ENODATA;
-
- /*
- * Each OPP is a set of tuples consisting of frequency and
- * voltage like <freq-kHz vol-uV>.
- */
- nr = prop->length / sizeof(u32);
- if (nr % 2) {
- dev_err(dev, "%s: Invalid OPP table\n", __func__);
- return -EINVAL;
- }
-
- val = prop->value;
- while (nr) {
- unsigned long freq = be32_to_cpup(val++) * 1000;
- unsigned long volt = be32_to_cpup(val++);
-
- if (_opp_add_v1(dev, freq, volt, false))
- dev_warn(dev, "%s: Failed to add OPP %ld\n",
- __func__, freq);
- nr -= 2;
- }
-
- return 0;
-}
/**
- * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * dev_pm_opp_remove_table() - Free all OPPs associated with the device
* @dev: device pointer used to lookup OPP table.
*
- * Register the initial OPP table with the OPP library for given device.
+ * Free both OPPs created using static entries present in DT and the
+ * dynamically added entries.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -ENODEV when 'operating-points' property is not found or is invalid data
- * in device node.
- * -ENODATA when empty 'operating-points' property is found
- * -EINVAL when invalid entries are found in opp-v2 table
*/
-int dev_pm_opp_of_add_table(struct device *dev)
+void dev_pm_opp_remove_table(struct device *dev)
{
- struct device_node *opp_np;
- int ret;
-
- /*
- * OPPs have two version of bindings now. The older one is deprecated,
- * try for the new binding first.
- */
- opp_np = _of_get_opp_desc_node(dev);
- if (!opp_np) {
- /*
- * Try old-deprecated bindings for backward compatibility with
- * older dtbs.
- */
- return _of_add_opp_table_v1(dev);
- }
-
- ret = _of_add_opp_table_v2(dev, opp_np);
- of_node_put(opp_np);
-
- return ret;
+ _dev_pm_opp_remove_table(dev, true);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index ba2bdbd932ef..83d6e7ba1a34 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/of.h>
#include <linux/slab.h>
#include "opp.h"
@@ -119,8 +118,66 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
-/* Required only for V1 bindings, as v2 can manage it from DT itself */
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
+{
+ struct device *cpu_dev;
+ int cpu;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ if (of)
+ dev_pm_opp_of_remove_table(cpu_dev);
+ else
+ dev_pm_opp_remove_table(cpu_dev);
+ }
+}
+
+/**
+ * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask: cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used to remove all the OPPs entries associated with
+ * the cpus in @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+ _dev_pm_opp_cpumask_remove_table(cpumask, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
+ *
+ * This marks OPP table of the @cpu_dev as shared by the CPUs present in
+ * @cpumask.
+ *
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
struct opp_device *opp_dev;
struct opp_table *opp_table;
@@ -131,7 +188,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
opp_table = _find_opp_table(cpu_dev);
if (IS_ERR(opp_table)) {
- ret = -EINVAL;
+ ret = PTR_ERR(opp_table);
goto unlock;
}
@@ -152,6 +209,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
__func__, cpu);
continue;
}
+
+ /* Mark opp-table as multiple CPUs are sharing it now */
+ opp_table->shared_opp = true;
}
unlock:
mutex_unlock(&opp_table_lock);
@@ -160,112 +220,47 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
-#ifdef CONFIG_OF
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
-{
- struct device *cpu_dev;
- int cpu;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- dev_pm_opp_of_remove_table(cpu_dev);
- }
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
-
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
-{
- struct device *cpu_dev;
- int cpu, ret = 0;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- ret = dev_pm_opp_of_add_table(cpu_dev);
- if (ret) {
- pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
-
- /* Free all other OPPs */
- dev_pm_opp_of_cpumask_remove_table(cpumask);
- break;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
-
-/*
- * Works only for OPP v2 bindings.
+/**
+ * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
*
- * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
*/
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- struct device_node *np, *tmp_np;
- struct device *tcpu_dev;
- int cpu, ret = 0;
-
- /* Get OPP descriptor node */
- np = _of_get_opp_desc_node(cpu_dev);
- if (!np) {
- dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
- return -ENOENT;
- }
-
- cpumask_set_cpu(cpu_dev->id, cpumask);
-
- /* OPPs are shared ? */
- if (!of_property_read_bool(np, "opp-shared"))
- goto put_cpu_node;
-
- for_each_possible_cpu(cpu) {
- if (cpu == cpu_dev->id)
- continue;
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
+ int ret = 0;
- tcpu_dev = get_cpu_device(cpu);
- if (!tcpu_dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- ret = -ENODEV;
- goto put_cpu_node;
- }
+ mutex_lock(&opp_table_lock);
- /* Get OPP descriptor node */
- tmp_np = _of_get_opp_desc_node(tcpu_dev);
- if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
- __func__);
- ret = -ENOENT;
- goto put_cpu_node;
- }
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto unlock;
+ }
- /* CPUs are sharing opp node */
- if (np == tmp_np)
- cpumask_set_cpu(cpu, cpumask);
+ cpumask_clear(cpumask);
- of_node_put(tmp_np);
+ if (opp_table->shared_opp) {
+ list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ cpumask_set_cpu(opp_dev->dev->id, cpumask);
+ } else {
+ cpumask_set_cpu(cpu_dev->id, cpumask);
}
-put_cpu_node:
- of_node_put(np);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
new file mode 100644
index 000000000000..94d2010558e3
--- /dev/null
+++ b/drivers/base/power/opp/of.c
@@ -0,0 +1,591 @@
+/*
+ * Generic OPP OF helpers
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/export.h>
+
+#include "opp.h"
+
+static struct opp_table *_managed_opp(const struct device_node *np)
+{
+ struct opp_table *opp_table;
+
+ list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+ if (opp_table->np == np) {
+ /*
+ * Multiple devices can point to the same OPP table and
+ * so will have same node-pointer, np.
+ *
+ * But the OPPs will be considered as shared only if the
+ * OPP table contains a "opp-shared" property.
+ */
+ return opp_table->shared_opp ? opp_table : NULL;
+ }
+ }
+
+ return NULL;
+}
+
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
+{
+ struct device_node *np;
+
+ /*
+ * Only required for backward compatibility with v1 bindings, but isn't
+ * harmful for other cases. And so we do it unconditionally.
+ */
+ np = of_node_get(dev->of_node);
+ if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+ of_node_put(np);
+ }
+}
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ unsigned int count = opp_table->supported_hw_count;
+ u32 version;
+ int ret;
+
+ if (!opp_table->supported_hw)
+ return true;
+
+ while (count--) {
+ ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+ &version);
+ if (ret) {
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+ __func__, count, ret);
+ return false;
+ }
+
+ /* Both of these are bitwise masks of the versions */
+ if (!(version & opp_table->supported_hw[count]))
+ return false;
+ }
+
+ return true;
+}
+
+/* TODO: Support multiple regulators */
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ struct opp_table *opp_table)
+{
+ u32 microvolt[3] = {0};
+ u32 val;
+ int count, ret;
+ struct property *prop = NULL;
+ char name[NAME_MAX];
+
+ /* Search for "opp-microvolt-<name>" */
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microvolt-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (!prop) {
+ /* Search for "opp-microvolt" */
+ sprintf(name, "opp-microvolt");
+ prop = of_find_property(opp->np, name, NULL);
+
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!prop)
+ return 0;
+ }
+
+ count = of_property_count_u32_elems(opp->np, name);
+ if (count < 0) {
+ dev_err(dev, "%s: Invalid %s property (%d)\n",
+ __func__, name, count);
+ return count;
+ }
+
+ /* There can be one or three elements here */
+ if (count != 1 && count != 3) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+ __func__, name, count);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+ if (ret) {
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
+ return -EINVAL;
+ }
+
+ opp->u_volt = microvolt[0];
+
+ if (count == 1) {
+ opp->u_volt_min = opp->u_volt;
+ opp->u_volt_max = opp->u_volt;
+ } else {
+ opp->u_volt_min = microvolt[1];
+ opp->u_volt_max = microvolt[2];
+ }
+
+ /* Search for "opp-microamp-<name>" */
+ prop = NULL;
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microamp-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (!prop) {
+ /* Search for "opp-microamp" */
+ sprintf(name, "opp-microamp");
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (prop && !of_property_read_u32(opp->np, name, &val))
+ opp->u_amp = val;
+
+ return 0;
+}
+
+/**
+ * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
+ * entries
+ * @dev: device pointer used to lookup OPP table.
+ *
+ * Free OPPs created using static entries present in DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_remove_table(struct device *dev)
+{
+ _dev_pm_opp_remove_table(dev, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+ /*
+ * TODO: Support for multiple OPP tables.
+ *
+ * There should be only ONE phandle present in "operating-points-v2"
+ * property.
+ */
+
+ return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+}
+
+/**
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
+ * @dev: device for which we do this operation
+ * @np: device node
+ *
+ * This function adds an opp definition to the opp table and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -EINVAL Failed parsing the OPP node
+ */
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *new_opp;
+ u64 rate;
+ u32 val;
+ int ret;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ new_opp = _allocate_opp(dev, &opp_table);
+ if (!new_opp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = of_property_read_u64(np, "opp-hz", &rate);
+ if (ret < 0) {
+ dev_err(dev, "%s: opp-hz not found\n", __func__);
+ goto free_opp;
+ }
+
+ /* Check if the OPP supports hardware's hierarchy of versions or not */
+ if (!_opp_is_supported(dev, opp_table, np)) {
+ dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+ goto free_opp;
+ }
+
+ /*
+ * Rate is defined as an unsigned long in clk API, and so casting
+ * explicitly to its type. Must be fixed once rate is 64 bit
+ * guaranteed in clk API.
+ */
+ new_opp->rate = (unsigned long)rate;
+ new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+ new_opp->np = np;
+ new_opp->dynamic = false;
+ new_opp->available = true;
+
+ if (!of_property_read_u32(np, "clock-latency-ns", &val))
+ new_opp->clock_latency_ns = val;
+
+ ret = opp_parse_supplies(new_opp, dev, opp_table);
+ if (ret)
+ goto free_opp;
+
+ ret = _opp_add(dev, new_opp, opp_table);
+ if (ret)
+ goto free_opp;
+
+ /* OPP to select on device suspend */
+ if (of_property_read_bool(np, "opp-suspend")) {
+ if (opp_table->suspend_opp) {
+ dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+ __func__, opp_table->suspend_opp->rate,
+ new_opp->rate);
+ } else {
+ new_opp->suspend = true;
+ opp_table->suspend_opp = new_opp;
+ }
+ }
+
+ if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+ opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+ mutex_unlock(&opp_table_lock);
+
+ pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+ __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+ new_opp->u_volt_min, new_opp->u_volt_max,
+ new_opp->clock_latency_ns);
+
+ /*
+ * Notify the changes in the availability of the operable
+ * frequency/voltage list.
+ */
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
+ return 0;
+
+free_opp:
+ _opp_remove(opp_table, new_opp, false);
+unlock:
+ mutex_unlock(&opp_table_lock);
+ return ret;
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
+{
+ struct device_node *np;
+ struct opp_table *opp_table;
+ int ret = 0, count = 0;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _managed_opp(opp_np);
+ if (opp_table) {
+ /* OPPs are already managed */
+ if (!_add_opp_dev(dev, opp_table))
+ ret = -ENOMEM;
+ mutex_unlock(&opp_table_lock);
+ return ret;
+ }
+ mutex_unlock(&opp_table_lock);
+
+ /* We have opp-table node now, iterate over it and add OPPs */
+ for_each_available_child_of_node(opp_np, np) {
+ count++;
+
+ ret = _opp_add_static_v2(dev, np);
+ if (ret) {
+ dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+ ret);
+ goto free_table;
+ }
+ }
+
+ /* There should be one of more OPP defined */
+ if (WARN_ON(!count))
+ return -ENOENT;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _find_opp_table(dev);
+ if (WARN_ON(IS_ERR(opp_table))) {
+ ret = PTR_ERR(opp_table);
+ mutex_unlock(&opp_table_lock);
+ goto free_table;
+ }
+
+ opp_table->np = opp_np;
+ opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+
+ mutex_unlock(&opp_table_lock);
+
+ return 0;
+
+free_table:
+ dev_pm_opp_of_remove_table(dev);
+
+ return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_add_opp_table_v1(struct device *dev)
+{
+ const struct property *prop;
+ const __be32 *val;
+ int nr;
+
+ prop = of_find_property(dev->of_node, "operating-points", NULL);
+ if (!prop)
+ return -ENODEV;
+ if (!prop->value)
+ return -ENODATA;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ dev_err(dev, "%s: Invalid OPP table\n", __func__);
+ return -EINVAL;
+ }
+
+ val = prop->value;
+ while (nr) {
+ unsigned long freq = be32_to_cpup(val++) * 1000;
+ unsigned long volt = be32_to_cpup(val++);
+
+ if (_opp_add_v1(dev, freq, volt, false))
+ dev_warn(dev, "%s: Failed to add OPP %ld\n",
+ __func__, freq);
+ nr -= 2;
+ }
+
+ return 0;
+}
+
+/**
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * @dev: device pointer used to lookup OPP table.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -ENODEV when 'operating-points' property is not found or is invalid data
+ * in device node.
+ * -ENODATA when empty 'operating-points' property is found
+ * -EINVAL when invalid entries are found in opp-v2 table
+ */
+int dev_pm_opp_of_add_table(struct device *dev)
+{
+ struct device_node *opp_np;
+ int ret;
+
+ /*
+ * OPPs have two version of bindings now. The older one is deprecated,
+ * try for the new binding first.
+ */
+ opp_np = _of_get_opp_desc_node(dev);
+ if (!opp_np) {
+ /*
+ * Try old-deprecated bindings for backward compatibility with
+ * older dtbs.
+ */
+ return _of_add_opp_table_v1(dev);
+ }
+
+ ret = _of_add_opp_table_v2(dev, opp_np);
+ of_node_put(opp_np);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
+
+/* CPU device specific helpers */
+
+/**
+ * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask: cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used only to remove static entries created from DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
+{
+ _dev_pm_opp_cpumask_remove_table(cpumask, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
+ * @cpumask: cpumask for which OPP table needs to be added.
+ *
+ * This adds the OPP tables for CPUs present in the @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
+{
+ struct device *cpu_dev;
+ int cpu, ret = 0;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret) {
+ pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
+
+ /* Free all other OPPs */
+ dev_pm_opp_of_cpumask_remove_table(cpumask);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+/**
+ * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
+ * @cpu_dev using operating-points-v2
+ * bindings.
+ *
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
+ *
+ * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
+ struct cpumask *cpumask)
+{
+ struct device_node *np, *tmp_np;
+ struct device *tcpu_dev;
+ int cpu, ret = 0;
+
+ /* Get OPP descriptor node */
+ np = _of_get_opp_desc_node(cpu_dev);
+ if (!np) {
+ dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ return -ENOENT;
+ }
+
+ cpumask_set_cpu(cpu_dev->id, cpumask);
+
+ /* OPPs are shared ? */
+ if (!of_property_read_bool(np, "opp-shared"))
+ goto put_cpu_node;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ ret = -ENODEV;
+ goto put_cpu_node;
+ }
+
+ /* Get OPP descriptor node */
+ tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ if (!tmp_np) {
+ dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ __func__);
+ ret = -ENOENT;
+ goto put_cpu_node;
+ }
+
+ /* CPUs are sharing opp node */
+ if (np == tmp_np)
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(tmp_np);
+ }
+
+put_cpu_node:
+ of_node_put(np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index f67f806fcf3a..20f3be22e060 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -28,6 +28,8 @@ struct regulator;
/* Lock to allow exclusive modification to the device and opp lists */
extern struct mutex opp_table_lock;
+extern struct list_head opp_tables;
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -183,6 +185,18 @@ struct opp_table {
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct device_node *_of_get_opp_desc_node(struct device *dev);
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
+struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify);
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic);
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
+
+#ifdef CONFIG_OF
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
+#else
+static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
+#endif
#ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 4c7055009bd6..b74690418504 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1506,11 +1506,16 @@ int pm_runtime_force_resume(struct device *dev)
goto out;
}
- ret = callback(dev);
+ ret = pm_runtime_set_active(dev);
if (ret)
goto out;
- pm_runtime_set_active(dev);
+ ret = callback(dev);
+ if (ret) {
+ pm_runtime_set_suspended(dev);
+ goto out;
+ }
+
pm_runtime_mark_last_busy(dev);
out:
pm_runtime_enable(dev);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 7f692accdc90..f38c21de29b7 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -19,6 +19,11 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
+struct property_set {
+ struct fwnode_handle fwnode;
+ struct property_entry *properties;
+};
+
static inline bool is_pset_node(struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
@@ -801,14 +806,14 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
}
/**
- * device_remove_property_set - Remove properties from a device object.
+ * device_remove_properties - Remove properties from a device object.
* @dev: Device whose properties to remove.
*
* The function removes properties previously associated to the device
- * secondary firmware node with device_add_property_set(). Memory allocated
+ * secondary firmware node with device_add_properties(). Memory allocated
* to the properties will also be released.
*/
-void device_remove_property_set(struct device *dev)
+void device_remove_properties(struct device *dev)
{
struct fwnode_handle *fwnode;
@@ -831,24 +836,27 @@ void device_remove_property_set(struct device *dev)
}
}
}
-EXPORT_SYMBOL_GPL(device_remove_property_set);
+EXPORT_SYMBOL_GPL(device_remove_properties);
/**
- * device_add_property_set - Add a collection of properties to a device object.
+ * device_add_properties - Add a collection of properties to a device object.
* @dev: Device to add properties to.
- * @pset: Collection of properties to add.
+ * @properties: Collection of properties to add.
*
- * Associate a collection of device properties represented by @pset with @dev
- * as its secondary firmware node. The function takes a copy of @pset.
+ * Associate a collection of device properties represented by @properties with
+ * @dev as its secondary firmware node. The function takes a copy of
+ * @properties.
*/
-int device_add_property_set(struct device *dev, const struct property_set *pset)
+int device_add_properties(struct device *dev, struct property_entry *properties)
{
- struct property_set *p;
+ struct property_set *p, pset;
- if (!pset)
+ if (!properties)
return -EINVAL;
- p = pset_copy_set(pset);
+ pset.properties = properties;
+
+ p = pset_copy_set(&pset);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -856,7 +864,7 @@ int device_add_property_set(struct device *dev, const struct property_set *pset)
set_secondary_fwnode(dev, &p->fwnode);
return 0;
}
-EXPORT_SYMBOL_GPL(device_add_property_set);
+EXPORT_SYMBOL_GPL(device_add_properties);
/**
* device_get_next_child_node - Return the next child node handle for a device
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 3ee72550b1e3..4d2e50bfc726 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -27,7 +27,7 @@ static int regcache_flat_init(struct regmap *map)
int i;
unsigned int *cache;
- if (!map || map->reg_stride_order < 0)
+ if (!map || map->reg_stride_order < 0 || !map->max_register)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4170b7d95276..df7ff7290821 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -529,7 +529,7 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
* regcache_cache_bypass: Put a register map into cache bypass mode
*
* @map: map to configure
- * @cache_bypass: flag if changes should not be written to the hardware
+ * @cache_bypass: flag if changes should not be written to the cache
*
* When a register map is marked with the cache bypass option, writes
* to the register map API will only update the hardware and not the
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index fa209773d494..2ba1494b2799 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2761,7 +2761,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
q->backing_dev_info.congested_data = device;
blk_queue_make_request(q, drbd_make_request);
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 80cf8add46ff..1fa8cc235977 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -943,7 +943,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
- blk_queue_flush(lo->lo_queue, REQ_FLUSH);
+ blk_queue_write_cache(lo->lo_queue, true, false);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 25824c1697c5..6053e4659fa2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3000,14 +3000,14 @@ restart_eh:
"Completion workers still active!");
spin_lock(dd->queue->queue_lock);
- blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ blk_mq_tagset_busy_iter(&dd->tags,
mtip_queue_cmd, dd);
spin_unlock(dd->queue->queue_lock);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
if (mtip_device_reset(dd))
- blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ blk_mq_tagset_busy_iter(&dd->tags,
mtip_abort_cmd, dd);
clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
@@ -4023,12 +4023,6 @@ skip_create_disk:
blk_queue_io_min(dd->queue, 4096);
blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
- /*
- * write back cache is not supported in the device. FUA depends on
- * write back cache support, hence setting flush support to zero.
- */
- blk_queue_flush(dd->queue, 0);
-
/* Signal trim support */
if (dd->trim_supp == true) {
set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
@@ -4174,7 +4168,7 @@ static int mtip_block_remove(struct driver_data *dd)
blk_mq_freeze_queue_start(dd->queue);
blk_mq_stop_hw_queues(dd->queue);
- blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+ blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
/*
* Delete our gendisk structure. This also removes the device
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08afbc7a2bb8..31e73a7a40f2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -693,9 +693,9 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
- blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+ blk_queue_write_cache(nbd->disk->queue, true, false);
else
- blk_queue_flush(nbd->disk->queue, 0);
+ blk_queue_write_cache(nbd->disk->queue, false, false);
}
static int nbd_dev_dbg_init(struct nbd_device *nbd);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1b709a4e3b5e..c2854a2bfdb0 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -437,7 +437,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
blk_queue_prep_rq(q, blk_queue_start_tag);
- blk_queue_flush(q, REQ_FLUSH);
+ blk_queue_write_cache(q, true, false);
disk->queue = q;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c120d70d3fb3..4b7e405830d7 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -468,7 +468,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
- blk_queue_flush(queue, REQ_FLUSH);
+ blk_queue_write_cache(queue, true, false);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 586f9168ffa4..910e065918af 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -133,7 +133,6 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
#define INQ_STD_NBYTES 36
-#define SKD_DISCARD_CDB_LENGTH 24
enum skd_drvr_state {
SKD_DRVR_STATE_LOAD,
@@ -212,7 +211,6 @@ struct skd_request_context {
struct request *req;
u8 flush_cmd;
- u8 discard_page;
u32 timeout_stamp;
u8 sg_data_dir;
@@ -230,7 +228,6 @@ struct skd_request_context {
};
#define SKD_DATA_DIR_HOST_TO_CARD 1
#define SKD_DATA_DIR_CARD_TO_HOST 2
-#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
struct skd_special_context {
struct skd_request_context req;
@@ -540,31 +537,6 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
scsi_req->cdb[9] = 0;
}
-static void
-skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
- struct skd_request_context *skreq,
- struct page *page,
- u32 lba, u32 count)
-{
- char *buf;
- unsigned long len;
- struct request *req;
-
- buf = page_address(page);
- len = SKD_DISCARD_CDB_LENGTH;
-
- scsi_req->cdb[0] = UNMAP;
- scsi_req->cdb[8] = len;
-
- put_unaligned_be16(6 + 16, &buf[0]);
- put_unaligned_be16(16, &buf[2]);
- put_unaligned_be64(lba, &buf[8]);
- put_unaligned_be32(count, &buf[16]);
-
- req = skreq->req;
- blk_add_request_payload(req, page, len);
-}
-
static void skd_request_fn_not_online(struct request_queue *q);
static void skd_request_fn(struct request_queue *q)
@@ -575,7 +547,6 @@ static void skd_request_fn(struct request_queue *q)
struct skd_request_context *skreq;
struct request *req = NULL;
struct skd_scsi_request *scsi_req;
- struct page *page;
unsigned long io_flags;
int error;
u32 lba;
@@ -669,7 +640,6 @@ static void skd_request_fn(struct request_queue *q)
skreq->flush_cmd = 0;
skreq->n_sg = 0;
skreq->sg_byte_count = 0;
- skreq->discard_page = 0;
/*
* OK to now dequeue request from q.
@@ -735,18 +705,7 @@ static void skd_request_fn(struct request_queue *q)
else
skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
- if (io_flags & REQ_DISCARD) {
- page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
- if (!page) {
- pr_err("request_fn:Page allocation failed.\n");
- skd_end_request(skdev, skreq, -ENOMEM);
- break;
- }
- skreq->discard_page = 1;
- req->completion_data = page;
- skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
-
- } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
+ if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
skd_prep_zerosize_flush_cdb(scsi_req, skreq);
SKD_ASSERT(skreq->flush_cmd == 1);
@@ -851,16 +810,6 @@ skip_sg:
static void skd_end_request(struct skd_device *skdev,
struct skd_request_context *skreq, int error)
{
- struct request *req = skreq->req;
- unsigned int io_flags = req->cmd_flags;
-
- if ((io_flags & REQ_DISCARD) &&
- (skreq->discard_page == 1)) {
- pr_debug("%s:%s:%d, free the page!",
- skdev->name, __func__, __LINE__);
- __free_page(req->completion_data);
- }
-
if (unlikely(error)) {
struct request *req = skreq->req;
char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
@@ -4412,19 +4361,13 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->queue = q;
q->queuedata = skdev;
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(q, true, true);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
/* set sysfs ptimal_io_size to 8K */
blk_queue_io_opt(q, 8192);
- /* DISCARD Flag initialization. */
- q->limits.discard_granularity = 8192;
- q->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- q->limits.discard_zeroes_data = 1;
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 28cff0d23d82..42758b52768c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -493,11 +493,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
u8 writeback = virtblk_get_cache_mode(vdev);
struct virtio_blk *vblk = vdev->priv;
- if (writeback)
- blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
- else
- blk_queue_flush(vblk->disk->queue, 0);
-
+ blk_queue_write_cache(vblk->disk->queue, writeback, false);
revalidate_disk(vblk->disk);
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 26aa080e243c..3355f1cdd4e5 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->type |= VDISK_REMOVABLE;
q = bdev_get_queue(bdev);
- if (q && q->flush_flags)
+ if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
vbd->flush_support = true;
if (q && blk_queue_secdiscard(q))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6405b6557792..ca13df854639 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -998,7 +998,8 @@ static const char *flush_info(unsigned int feature_flush)
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_flush(info->rq, info->feature_flush);
+ blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
+ info->feature_flush & REQ_FUA);
pr_info("blkfront: %s: %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info->feature_flush),
"persistent grants:", info->feature_persistent ?
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 67ee8b08ab53..ac51149e9777 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -268,19 +268,6 @@ config HW_RANDOM_NOMADIK
If unsure, say Y.
-config HW_RANDOM_PPC4XX
- tristate "PowerPC 4xx generic true random number generator support"
- depends on PPC && 4xx
- default HW_RANDOM
- ---help---
- This driver provides the kernel-side support for the TRNG hardware
- found in the security function of some PowerPC 4xx SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called ppc4xx-rng.
-
- If unsure, say N.
-
config HW_RANDOM_PSERIES
tristate "pSeries HW Random Number Generator support"
depends on PPC64 && IBMVIO
@@ -309,7 +296,8 @@ config HW_RANDOM_POWERNV
config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support"
- depends on ARCH_EXYNOS
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -333,6 +321,19 @@ config HW_RANDOM_TPM
If unsure, say Y.
+config HW_RANDOM_HISI
+ tristate "Hisilicon Random Number Generator support"
+ depends on HW_RANDOM && ARCH_HISI
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Hisilicon Hip04 and Hip05 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hisi-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index f5a6fa7690e7..63022b49f160 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -22,10 +22,10 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
-obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
+obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index ada081232528..ed44561ea647 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -2,7 +2,7 @@
* exynos-rng.c - Random Number Generator driver for the exynos
*
* Copyright (C) 2012 Samsung Electronics
- * Jonghwa Lee <jonghwa3.lee@smasung.com>
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -77,7 +77,8 @@ static int exynos_init(struct hwrng *rng)
pm_runtime_get_sync(exynos_rng->dev);
ret = exynos_rng_configure(exynos_rng);
- pm_runtime_put_noidle(exynos_rng->dev);
+ pm_runtime_mark_last_busy(exynos_rng->dev);
+ pm_runtime_put_autosuspend(exynos_rng->dev);
return ret;
}
@@ -89,6 +90,7 @@ static int exynos_read(struct hwrng *rng, void *buf,
struct exynos_rng, rng);
u32 *data = buf;
int retry = 100;
+ int ret = 4;
pm_runtime_get_sync(exynos_rng->dev);
@@ -97,23 +99,27 @@ static int exynos_read(struct hwrng *rng, void *buf,
while (!(exynos_rng_readl(exynos_rng,
EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
cpu_relax();
- if (!retry)
- return -ETIMEDOUT;
+ if (!retry) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
*data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
+out:
pm_runtime_mark_last_busy(exynos_rng->dev);
pm_runtime_put_sync_autosuspend(exynos_rng->dev);
- return 4;
+ return ret;
}
static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
struct resource *res;
+ int ret;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
@@ -141,7 +147,21 @@ static int exynos_rng_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ if (ret) {
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+
+ return ret;
+}
+
+static int exynos_rng_remove(struct platform_device *pdev)
+{
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
}
static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
@@ -201,6 +221,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
+ .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
new file mode 100644
index 000000000000..40d96572c591
--- /dev/null
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 HiSilicon Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#define RNG_SEED 0x0
+#define RNG_CTRL 0x4
+ #define RNG_SEED_SEL BIT(2)
+ #define RNG_RING_EN BIT(1)
+ #define RNG_EN BIT(0)
+#define RNG_RAN_NUM 0x10
+#define RNG_PHY_SEED 0x14
+
+#define to_hisi_rng(p) container_of(p, struct hisi_rng, rng)
+
+static int seed_sel;
+module_param(seed_sel, int, S_IRUGO);
+MODULE_PARM_DESC(seed_sel, "Auto reload seed. 0, use LFSR(default); 1, use ring oscillator.");
+
+struct hisi_rng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int hisi_rng_init(struct hwrng *rng)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+ int val = RNG_EN;
+ u32 seed;
+
+ /* get a random number as initial seed */
+ get_random_bytes(&seed, sizeof(seed));
+
+ writel_relaxed(seed, hrng->base + RNG_SEED);
+
+ /**
+ * The seed is reload periodically, there are two choice
+ * of seeds, default seed using the value from LFSR, or
+ * will use seed generated by ring oscillator.
+ */
+ if (seed_sel == 1)
+ val |= RNG_RING_EN | RNG_SEED_SEL;
+
+ writel_relaxed(val, hrng->base + RNG_CTRL);
+ return 0;
+}
+
+static void hisi_rng_cleanup(struct hwrng *rng)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+
+ writel_relaxed(0, hrng->base + RNG_CTRL);
+}
+
+static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+ u32 *data = buf;
+
+ *data = readl_relaxed(hrng->base + RNG_RAN_NUM);
+ return 4;
+}
+
+static int hisi_rng_probe(struct platform_device *pdev)
+{
+ struct hisi_rng *rng;
+ struct resource *res;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ rng->rng.name = pdev->name;
+ rng->rng.init = hisi_rng_init;
+ rng->rng.cleanup = hisi_rng_cleanup;
+ rng->rng.read = hisi_rng_read;
+
+ ret = devm_hwrng_register(&pdev->dev, &rng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id hisi_rng_dt_ids[] = {
+ { .compatible = "hisilicon,hip04-rng" },
+ { .compatible = "hisilicon,hip05-rng" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hisi_rng_dt_ids);
+
+static struct platform_driver hisi_rng_driver = {
+ .probe = hisi_rng_probe,
+ .driver = {
+ .name = "hisi-rng",
+ .of_match_table = of_match_ptr(hisi_rng_dt_ids),
+ },
+};
+
+module_platform_driver(hisi_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kefeng Wang <wangkefeng.wang@huawei>");
+MODULE_DESCRIPTION("Hisilicon random number generator driver");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
deleted file mode 100644
index c0db4387d2e2..000000000000
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Generic PowerPC 44x RNG driver
- *
- * Copyright 2011 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <asm/io.h>
-
-#define PPC4XX_TRNG_DEV_CTRL 0x60080
-
-#define PPC4XX_TRNGE 0x00020000
-#define PPC4XX_TRNG_CTRL 0x0008
-#define PPC4XX_TRNG_CTRL_DALM 0x20
-#define PPC4XX_TRNG_STAT 0x0004
-#define PPC4XX_TRNG_STAT_B 0x1
-#define PPC4XX_TRNG_DATA 0x0000
-
-#define MODULE_NAME "ppc4xx_rng"
-
-static int ppc4xx_rng_data_present(struct hwrng *rng, int wait)
-{
- void __iomem *rng_regs = (void __iomem *) rng->priv;
- int busy, i, present = 0;
-
- for (i = 0; i < 20; i++) {
- busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B);
- if (!busy || !wait) {
- present = 1;
- break;
- }
- udelay(10);
- }
- return present;
-}
-
-static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
- void __iomem *rng_regs = (void __iomem *) rng->priv;
- *data = in_le32(rng_regs + PPC4XX_TRNG_DATA);
- return 4;
-}
-
-static int ppc4xx_rng_enable(int enable)
-{
- struct device_node *ctrl;
- void __iomem *ctrl_reg;
- int err = 0;
- u32 val;
-
- /* Find the main crypto device node and map it to turn the TRNG on */
- ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto");
- if (!ctrl)
- return -ENODEV;
-
- ctrl_reg = of_iomap(ctrl, 0);
- if (!ctrl_reg) {
- err = -ENODEV;
- goto out;
- }
-
- val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL);
-
- if (enable)
- val |= PPC4XX_TRNGE;
- else
- val = val & ~PPC4XX_TRNGE;
-
- out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val);
- iounmap(ctrl_reg);
-
-out:
- of_node_put(ctrl);
-
- return err;
-}
-
-static struct hwrng ppc4xx_rng = {
- .name = MODULE_NAME,
- .data_present = ppc4xx_rng_data_present,
- .data_read = ppc4xx_rng_data_read,
-};
-
-static int ppc4xx_rng_probe(struct platform_device *dev)
-{
- void __iomem *rng_regs;
- int err = 0;
-
- rng_regs = of_iomap(dev->dev.of_node, 0);
- if (!rng_regs)
- return -ENODEV;
-
- err = ppc4xx_rng_enable(1);
- if (err)
- return err;
-
- out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
- ppc4xx_rng.priv = (unsigned long) rng_regs;
-
- err = hwrng_register(&ppc4xx_rng);
-
- return err;
-}
-
-static int ppc4xx_rng_remove(struct platform_device *dev)
-{
- void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv;
-
- hwrng_unregister(&ppc4xx_rng);
- ppc4xx_rng_enable(0);
- iounmap(rng_regs);
-
- return 0;
-}
-
-static const struct of_device_id ppc4xx_rng_match[] = {
- { .compatible = "ppc4xx-rng", },
- { .compatible = "amcc,ppc460ex-rng", },
- { .compatible = "amcc,ppc440epx-rng", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ppc4xx_rng_match);
-
-static struct platform_driver ppc4xx_rng_driver = {
- .driver = {
- .name = MODULE_NAME,
- .of_match_table = ppc4xx_rng_match,
- },
- .probe = ppc4xx_rng_probe,
- .remove = ppc4xx_rng_remove,
-};
-
-module_platform_driver(ppc4xx_rng_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1e25b5205724..7b1c412b40a2 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -104,7 +104,7 @@ enum si_intf_state {
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
enum si_type {
- SI_KCS, SI_SMIC, SI_BT
+ SI_KCS, SI_SMIC, SI_BT
};
static const char * const si_to_str[] = { "kcs", "smic", "bt" };
@@ -410,7 +410,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
rv = SI_SM_CALL_WITHOUT_DELAY;
}
- out:
+out:
return rv;
}
@@ -539,7 +539,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
static void handle_flags(struct smi_info *smi_info)
{
- retry:
+retry:
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
/* Watchdog pre-timeout */
smi_inc_stat(smi_info, watchdog_pretimeouts);
@@ -831,7 +831,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
{
enum si_sm_result si_sm_result;
- restart:
+restart:
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -944,7 +944,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
smi_info->timer_running = false;
}
- out:
+out:
return si_sm_result;
}
@@ -1190,7 +1190,7 @@ static void smi_timeout(unsigned long data)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
- do_mod_timer:
+do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -1576,10 +1576,9 @@ static int port_setup(struct smi_info *info)
if (request_region(addr + idx * info->io.regspacing,
info->io.regsize, DEVICE_NAME) == NULL) {
/* Undo allocations */
- while (idx--) {
+ while (idx--)
release_region(addr + idx * info->io.regspacing,
info->io.regsize);
- }
return -EIO;
}
}
@@ -1638,25 +1637,28 @@ static void mem_outq(const struct si_sm_io *io, unsigned int offset,
}
#endif
-static void mem_cleanup(struct smi_info *info)
+static void mem_region_cleanup(struct smi_info *info, int num)
{
unsigned long addr = info->io.addr_data;
- int mapsize;
+ int idx;
+ for (idx = 0; idx < num; idx++)
+ release_mem_region(addr + idx * info->io.regspacing,
+ info->io.regsize);
+}
+
+static void mem_cleanup(struct smi_info *info)
+{
if (info->io.addr) {
iounmap(info->io.addr);
-
- mapsize = ((info->io_size * info->io.regspacing)
- - (info->io.regspacing - info->io.regsize));
-
- release_mem_region(addr, mapsize);
+ mem_region_cleanup(info, info->io_size);
}
}
static int mem_setup(struct smi_info *info)
{
unsigned long addr = info->io.addr_data;
- int mapsize;
+ int mapsize, idx;
if (!addr)
return -ENODEV;
@@ -1693,6 +1695,21 @@ static int mem_setup(struct smi_info *info)
}
/*
+ * Some BIOSes reserve disjoint memory regions in their ACPI
+ * tables. This causes problems when trying to request the
+ * entire region. Therefore we must request each register
+ * separately.
+ */
+ for (idx = 0; idx < info->io_size; idx++) {
+ if (request_mem_region(addr + idx * info->io.regspacing,
+ info->io.regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ mem_region_cleanup(info, idx);
+ return -EIO;
+ }
+ }
+
+ /*
* Calculate the total amount of memory to claim. This is an
* unusual looking calculation, but it avoids claiming any
* more memory than it has to. It will claim everything
@@ -1701,13 +1718,9 @@ static int mem_setup(struct smi_info *info)
*/
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
-
- if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
- return -EIO;
-
info->io.addr = ioremap(addr, mapsize);
if (info->io.addr == NULL) {
- release_mem_region(addr, mapsize);
+ mem_region_cleanup(info, info->io_size);
return -EIO;
}
return 0;
@@ -1975,7 +1988,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
}
rv = len;
- out:
+out:
kfree(str);
return rv;
}
@@ -2945,7 +2958,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
/* Check and record info from the get device id, in case we need it. */
rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
- out:
+out:
kfree(resp);
return rv;
}
@@ -3192,7 +3205,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
else
smi_info->supports_event_msg_buff = true;
- out:
+out:
kfree(resp);
return rv;
}
@@ -3718,10 +3731,10 @@ static int try_smi_init(struct smi_info *new_smi)
return 0;
- out_err_stop_timer:
+out_err_stop_timer:
wait_for_timer_and_thread(new_smi);
- out_err:
+out_err:
new_smi->interrupt_disabled = true;
if (new_smi->intf) {
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8b3be8b92573..097c86898608 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1870,7 +1870,7 @@ static int try_init_spmi(struct SPMITable *spmi)
return -EIO;
}
- myaddr = spmi->addr.address >> 1;
+ myaddr = spmi->addr.address & 0x7f;
return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI);
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c346be650892..6ff327abc555 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -186,6 +186,12 @@ config CLKSRC_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO
+config CLKSRC_MPS2
+ bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+
config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index dc2b8997f6e6..b0a3c96fcd4f 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_CLKSRC_LPC32XX) += time-lpc32xx.o
+obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 63345260244d..797505aa2ba4 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -264,6 +264,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.set_state_shutdown = apbt_shutdown;
dw_ced->ced.set_state_periodic = apbt_set_periodic;
dw_ced->ced.set_state_oneshot = apbt_set_oneshot;
+ dw_ced->ced.set_state_oneshot_stopped = apbt_shutdown;
dw_ced->ced.tick_resume = apbt_resume;
dw_ced->ced.set_next_event = apbt_next_event;
dw_ced->ced.irq = dw_ced->timer.irq;
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
new file mode 100644
index 000000000000..3d33a5e23dee
--- /dev/null
+++ b/drivers/clocksource/mps2-timer.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#define TIMER_CTRL 0x0
+#define TIMER_CTRL_ENABLE BIT(0)
+#define TIMER_CTRL_IE BIT(3)
+
+#define TIMER_VALUE 0x4
+#define TIMER_RELOAD 0x8
+#define TIMER_INT 0xc
+
+struct clockevent_mps2 {
+ void __iomem *reg;
+ u32 clock_count_per_tick;
+ struct clock_event_device clkevt;
+};
+
+static void __iomem *sched_clock_base;
+
+static u64 notrace mps2_sched_read(void)
+{
+ return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
+}
+
+static inline struct clockevent_mps2 *to_mps2_clkevt(struct clock_event_device *c)
+{
+ return container_of(c, struct clockevent_mps2, clkevt);
+}
+
+static void clockevent_mps2_writel(u32 val, struct clock_event_device *c, u32 offset)
+{
+ writel_relaxed(val, to_mps2_clkevt(c)->reg + offset);
+}
+
+static int mps2_timer_shutdown(struct clock_event_device *ce)
+{
+ clockevent_mps2_writel(0, ce, TIMER_RELOAD);
+ clockevent_mps2_writel(0, ce, TIMER_CTRL);
+
+ return 0;
+}
+
+static int mps2_timer_set_next_event(unsigned long next, struct clock_event_device *ce)
+{
+ clockevent_mps2_writel(next, ce, TIMER_VALUE);
+ clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
+
+ return 0;
+}
+
+static int mps2_timer_set_periodic(struct clock_event_device *ce)
+{
+ u32 clock_count_per_tick = to_mps2_clkevt(ce)->clock_count_per_tick;
+
+ clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_RELOAD);
+ clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_VALUE);
+ clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
+
+ return 0;
+}
+
+static irqreturn_t mps2_timer_interrupt(int irq, void *dev_id)
+{
+ struct clockevent_mps2 *ce = dev_id;
+ u32 status = readl_relaxed(ce->reg + TIMER_INT);
+
+ if (!status) {
+ pr_warn("spurious interrupt\n");
+ return IRQ_NONE;
+ }
+
+ writel_relaxed(1, ce->reg + TIMER_INT);
+
+ ce->clkevt.event_handler(&ce->clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int __init mps2_clockevent_init(struct device_node *np)
+{
+ void __iomem *base;
+ struct clk *clk = NULL;
+ struct clockevent_mps2 *ce;
+ u32 rate;
+ int irq, ret;
+ const char *name = "mps2-clkevt";
+
+ ret = of_property_read_u32(np, "clock-frequency", &rate);
+ if (ret) {
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clockevent: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable clock for clockevent: %d\n", ret);
+ goto out_clk_put;
+ }
+
+ rate = clk_get_rate(clk);
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map register for clockevent: %d\n", ret);
+ goto out_clk_disable;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENOENT;
+ pr_err("failed to get irq for clockevent: %d\n", ret);
+ goto out_iounmap;
+ }
+
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ce->reg = base;
+ ce->clock_count_per_tick = DIV_ROUND_CLOSEST(rate, HZ);
+ ce->clkevt.irq = irq;
+ ce->clkevt.name = name;
+ ce->clkevt.rating = 200;
+ ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ce->clkevt.cpumask = cpu_possible_mask;
+ ce->clkevt.set_state_shutdown = mps2_timer_shutdown,
+ ce->clkevt.set_state_periodic = mps2_timer_set_periodic,
+ ce->clkevt.set_state_oneshot = mps2_timer_shutdown,
+ ce->clkevt.set_next_event = mps2_timer_set_next_event;
+
+ /* Ensure timer is disabled */
+ writel_relaxed(0, base + TIMER_CTRL);
+
+ ret = request_irq(irq, mps2_timer_interrupt, IRQF_TIMER, name, ce);
+ if (ret) {
+ pr_err("failed to request irq for clockevent: %d\n", ret);
+ goto out_kfree;
+ }
+
+ clockevents_config_and_register(&ce->clkevt, rate, 0xf, 0xffffffff);
+
+ return 0;
+
+out_kfree:
+ kfree(ce);
+out_iounmap:
+ iounmap(base);
+out_clk_disable:
+ /* clk_{disable, unprepare, put}() can handle NULL as a parameter */
+ clk_disable_unprepare(clk);
+out_clk_put:
+ clk_put(clk);
+out:
+ return ret;
+}
+
+static int __init mps2_clocksource_init(struct device_node *np)
+{
+ void __iomem *base;
+ struct clk *clk = NULL;
+ u32 rate;
+ int ret;
+ const char *name = "mps2-clksrc";
+
+ ret = of_property_read_u32(np, "clock-frequency", &rate);
+ if (ret) {
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clocksource: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable clock for clocksource: %d\n", ret);
+ goto out_clk_put;
+ }
+
+ rate = clk_get_rate(clk);
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map register for clocksource: %d\n", ret);
+ goto out_clk_disable;
+ }
+
+ /* Ensure timer is disabled */
+ writel_relaxed(0, base + TIMER_CTRL);
+
+ /* ... and set it up as free-running clocksource */
+ writel_relaxed(0xffffffff, base + TIMER_VALUE);
+ writel_relaxed(0xffffffff, base + TIMER_RELOAD);
+
+ writel_relaxed(TIMER_CTRL_ENABLE, base + TIMER_CTRL);
+
+ ret = clocksource_mmio_init(base + TIMER_VALUE, name,
+ rate, 200, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("failed to init clocksource: %d\n", ret);
+ goto out_iounmap;
+ }
+
+ sched_clock_base = base;
+ sched_clock_register(mps2_sched_read, 32, rate);
+
+ return 0;
+
+out_iounmap:
+ iounmap(base);
+out_clk_disable:
+ /* clk_{disable, unprepare, put}() can handle NULL as a parameter */
+ clk_disable_unprepare(clk);
+out_clk_put:
+ clk_put(clk);
+out:
+ return ret;
+}
+
+static void __init mps2_timer_init(struct device_node *np)
+{
+ static int has_clocksource, has_clockevent;
+ int ret;
+
+ if (!has_clocksource) {
+ ret = mps2_clocksource_init(np);
+ if (!ret) {
+ has_clocksource = 1;
+ return;
+ }
+ }
+
+ if (!has_clockevent) {
+ ret = mps2_clockevent_init(np);
+ if (!ret) {
+ has_clockevent = 1;
+ return;
+ }
+ }
+}
+
+CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index d67bc356488f..7e583f8ea5f4 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -152,7 +152,7 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
}
static void
-mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
+__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
{
writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
evt->gpt_base + TIMER_CTRL_REG(timer));
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 38333aba3055..7b94ad2ab278 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -258,17 +258,3 @@ static void __init tegra20_init_rtc(struct device_node *np)
register_persistent_clock(NULL, tegra_read_persistent_clock64);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
-
-#ifdef CONFIG_PM
-static u32 usec_config;
-
-void tegra_timer_suspend(void)
-{
- usec_config = timer_readl(TIMERUS_USEC_CFG);
-}
-
-void tegra_timer_resume(void)
-{
- timer_writel(usec_config, TIMERUS_USEC_CFG);
-}
-#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a7f45853c103..b7445b6ae5a4 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -18,7 +18,11 @@ config CPU_FREQ
if CPU_FREQ
+config CPU_FREQ_GOV_ATTR_SET
+ bool
+
config CPU_FREQ_GOV_COMMON
+ select CPU_FREQ_GOV_ATTR_SET
select IRQ_WORK
bool
@@ -103,6 +107,17 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+ bool "schedutil"
+ depends on SMP
+ select CPU_FREQ_GOV_SCHEDUTIL
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the 'schedutil' CPUFreq governor by default. If unsure,
+ have a look at the help section of that governor. The fallback
+ governor will be 'performance'.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -184,6 +199,26 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
+config CPU_FREQ_GOV_SCHEDUTIL
+ tristate "'schedutil' cpufreq policy governor"
+ depends on CPU_FREQ && SMP
+ select CPU_FREQ_GOV_ATTR_SET
+ select IRQ_WORK
+ help
+ This governor makes decisions based on the utilization data provided
+ by the scheduler. It sets the CPU frequency to be proportional to
+ the utilization/capacity ratio coming from the scheduler. If the
+ utilization is frequency-invariant, the new frequency is also
+ proportional to the maximum available frequency. If that is not the
+ case, it is proportional to the current frequency of the CPU. The
+ frequency tipping point is at utilization/capacity equal to 80% in
+ both cases.
+
+ To compile this driver as a module, choose M here: the module will
+ be called cpufreq_schedutil.
+
+ If in doubt, say N.
+
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
@@ -191,6 +226,7 @@ config CPUFREQ_DT
depends on HAVE_CLK && OF
# if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
depends on !CPU_THERMAL || THERMAL
+ select CPUFREQ_DT_PLATDEV
select PM_OPP
help
This adds a generic DT based cpufreq driver for frequency management.
@@ -199,6 +235,15 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_PLATDEV
+ bool
+ help
+ This adds a generic DT based cpufreq platdev driver for frequency
+ management. This creates a 'cpufreq-dt' platform device, on the
+ supported platforms.
+
+ If in doubt, say N.
+
if X86
source "drivers/cpufreq/Kconfig.x86"
endif
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 14b1f9393b05..d89b8afe23b6 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -50,15 +50,6 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
-config ARM_HISI_ACPU_CPUFREQ
- tristate "Hisilicon ACPU CPUfreq driver"
- depends on ARCH_HISI && CPUFREQ_DT
- select PM_OPP
- help
- This enables the hisilicon ACPU CPUfreq driver.
-
- If in doubt, say N.
-
config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index c59bdcb83217..adbd1de1cea5 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,6 +5,7 @@
config X86_INTEL_PSTATE
bool "Intel P state control"
depends on X86
+ select ACPI_PROCESSOR if ACPI
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9e63fb1b09f8..e1eb11ee3570 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -11,8 +11,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
+obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
##################################################################################
# x86 drivers.
@@ -53,7 +55,6 @@ obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
-obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index fb5712141040..32a15052f363 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -25,6 +25,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -50,8 +52,6 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");
-#define PFX "acpi-cpufreq: "
-
enum {
UNDEFINED_CAPABLE = 0,
SYSTEM_INTEL_MSR_CAPABLE,
@@ -65,7 +65,6 @@ enum {
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
struct acpi_cpufreq_data {
- struct cpufreq_frequency_table *freq_table;
unsigned int resume;
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
@@ -200,8 +199,9 @@ static int check_amd_hwpstate_cpu(unsigned int cpuid)
return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
}
-static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
+static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
int i;
@@ -209,13 +209,14 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
for (i = 0; i < perf->state_count; i++) {
if (value == perf->states[i].status)
- return data->freq_table[i].frequency;
+ return policy->freq_table[i].frequency;
}
return 0;
}
-static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
+static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
struct cpufreq_frequency_table *pos;
struct acpi_processor_performance *perf;
@@ -226,20 +227,22 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
perf = to_perf_data(data);
- cpufreq_for_each_entry(pos, data->freq_table)
+ cpufreq_for_each_entry(pos, policy->freq_table)
if (msr == perf->states[pos->driver_data].status)
return pos->frequency;
- return data->freq_table[0].frequency;
+ return policy->freq_table[0].frequency;
}
-static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
+static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+
switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
case SYSTEM_AMD_MSR_CAPABLE:
- return extract_msr(val, data);
+ return extract_msr(policy, val);
case SYSTEM_IO_CAPABLE:
- return extract_io(val, data);
+ return extract_io(policy, val);
default:
return 0;
}
@@ -374,11 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return 0;
data = policy->driver_data;
- if (unlikely(!data || !data->freq_table))
+ if (unlikely(!data || !policy->freq_table))
return 0;
- cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
- freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
+ cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+ freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
if (freq != cached_freq) {
/*
* The dreaded BIOS frequency change behind our back.
@@ -392,14 +395,15 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return freq;
}
-static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
- struct acpi_cpufreq_data *data)
+static unsigned int check_freqs(struct cpufreq_policy *policy,
+ const struct cpumask *mask, unsigned int freq)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
unsigned int cur_freq;
unsigned int i;
for (i = 0; i < 100; i++) {
- cur_freq = extract_freq(get_cur_val(mask, data), data);
+ cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
udelay(10);
@@ -416,12 +420,12 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
- if (unlikely(data == NULL || data->freq_table == NULL)) {
+ if (unlikely(!data)) {
return -ENODEV;
}
perf = to_perf_data(data);
- next_perf_state = data->freq_table[index].driver_data;
+ next_perf_state = policy->freq_table[index].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n",
@@ -444,8 +448,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
drv_write(data, mask, perf->states[next_perf_state].control);
if (acpi_pstate_strict) {
- if (!check_freqs(mask, data->freq_table[index].frequency,
- data)) {
+ if (!check_freqs(policy, mask,
+ policy->freq_table[index].frequency)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
result = -EAGAIN;
@@ -458,6 +462,43 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
return result;
}
+unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ struct acpi_processor_performance *perf;
+ struct cpufreq_frequency_table *entry;
+ unsigned int next_perf_state, next_freq, freq;
+
+ /*
+ * Find the closest frequency above target_freq.
+ *
+ * The table is sorted in the reverse order with respect to the
+ * frequency and all of the entries are valid (see the initialization).
+ */
+ entry = policy->freq_table;
+ do {
+ entry++;
+ freq = entry->frequency;
+ } while (freq >= target_freq && freq != CPUFREQ_TABLE_END);
+ entry--;
+ next_freq = entry->frequency;
+ next_perf_state = entry->driver_data;
+
+ perf = to_perf_data(data);
+ if (perf->state == next_perf_state) {
+ if (unlikely(data->resume))
+ data->resume = 0;
+ else
+ return next_freq;
+ }
+
+ data->cpu_freq_write(&perf->control_register,
+ perf->states[next_perf_state].control);
+ perf->state = next_perf_state;
+ return next_freq;
+}
+
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
@@ -611,10 +652,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
if ((c->x86 == 15) &&
(c->x86_model == 6) &&
(c->x86_mask == 8)) {
- printk(KERN_INFO "acpi-cpufreq: Intel(R) "
- "Xeon(R) 7100 Errata AL30, processors may "
- "lock up on frequency changes: disabling "
- "acpi-cpufreq.\n");
+ pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
return -ENODEV;
}
}
@@ -631,6 +669,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf;
+ struct cpufreq_frequency_table *freq_table;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@@ -690,7 +729,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(data->freqdomain_cpus,
topology_sibling_cpumask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
- pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+ pr_info_once("overriding BIOS provided _PSD data\n");
}
#endif
@@ -742,9 +781,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- data->freq_table = kzalloc(sizeof(*data->freq_table) *
+ freq_table = kzalloc(sizeof(*freq_table) *
(perf->state_count+1), GFP_KERNEL);
- if (!data->freq_table) {
+ if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
}
@@ -762,30 +801,29 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
policy->cpuinfo.transition_latency > 20 * 1000) {
policy->cpuinfo.transition_latency = 20 * 1000;
- printk_once(KERN_INFO
- "P-state transition latency capped at 20 uS\n");
+ pr_info_once("P-state transition latency capped at 20 uS\n");
}
/* table init */
for (i = 0; i < perf->state_count; i++) {
if (i > 0 && perf->states[i].core_frequency >=
- data->freq_table[valid_states-1].frequency / 1000)
+ freq_table[valid_states-1].frequency / 1000)
continue;
- data->freq_table[valid_states].driver_data = i;
- data->freq_table[valid_states].frequency =
+ freq_table[valid_states].driver_data = i;
+ freq_table[valid_states].frequency =
perf->states[i].core_frequency * 1000;
valid_states++;
}
- data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
perf->state = 0;
- result = cpufreq_table_validate_and_show(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, freq_table);
if (result)
goto err_freqfree;
if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
- printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
@@ -821,10 +859,13 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
*/
data->resume = 1;
+ policy->fast_switch_possible = !acpi_pstate_strict &&
+ !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+
return result;
err_freqfree:
- kfree(data->freq_table);
+ kfree(freq_table);
err_unreg:
acpi_processor_unregister_performance(cpu);
err_free_mask:
@@ -842,13 +883,12 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
pr_debug("acpi_cpufreq_cpu_exit\n");
- if (data) {
- policy->driver_data = NULL;
- acpi_processor_unregister_performance(data->acpi_perf_cpu);
- free_cpumask_var(data->freqdomain_cpus);
- kfree(data->freq_table);
- kfree(data);
- }
+ policy->fast_switch_possible = false;
+ policy->driver_data = NULL;
+ acpi_processor_unregister_performance(data->acpi_perf_cpu);
+ free_cpumask_var(data->freqdomain_cpus);
+ kfree(policy->freq_table);
+ kfree(data);
return 0;
}
@@ -876,6 +916,7 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
static struct cpufreq_driver acpi_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = acpi_cpufreq_target,
+ .fast_switch = acpi_cpufreq_fast_switch,
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index c251247ae661..418042201e6d 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -298,7 +298,8 @@ static int merge_cluster_tables(void)
return 0;
}
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
@@ -308,11 +309,12 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpu_dev);
+ arm_bL_ops->free_opp_table(cpumask);
dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i;
@@ -321,7 +323,7 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
return;
if (cluster < MAX_CLUSTERS)
- return _put_cluster_clk_and_freq_table(cpu_dev);
+ return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
@@ -330,14 +332,15 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
return;
}
- _put_cluster_clk_and_freq_table(cdev);
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
}
/* free virtual table */
kfree(freq_table[cluster]);
}
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
int ret;
@@ -345,7 +348,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
if (freq_table[cluster])
return 0;
- ret = arm_bL_ops->init_opp_table(cpu_dev);
+ ret = arm_bL_ops->init_opp_table(cpumask);
if (ret) {
dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
@@ -374,14 +377,15 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
free_opp_table:
if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpu_dev);
+ arm_bL_ops->free_opp_table(cpumask);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i, ret;
@@ -390,7 +394,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
return 0;
if (cluster < MAX_CLUSTERS) {
- ret = _get_cluster_clk_and_freq_table(cpu_dev);
+ ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
if (ret)
atomic_dec(&cluster_usage[cluster]);
return ret;
@@ -407,7 +411,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
return -ENODEV;
}
- ret = _get_cluster_clk_and_freq_table(cdev);
+ ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
if (ret)
goto put_clusters;
}
@@ -433,7 +437,7 @@ put_clusters:
return -ENODEV;
}
- _put_cluster_clk_and_freq_table(cdev);
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
}
atomic_dec(&cluster_usage[cluster]);
@@ -455,18 +459,6 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- ret = get_cluster_clk_and_freq_table(cpu_dev);
- if (ret)
- return ret;
-
- ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
- if (ret) {
- dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
- policy->cpu, cur_cluster);
- put_cluster_clk_and_freq_table(cpu_dev);
- return ret;
- }
-
if (cur_cluster < MAX_CLUSTERS) {
int cpu;
@@ -479,6 +471,18 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
}
+ ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ if (ret)
+ return ret;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
+ if (ret) {
+ dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
+ policy->cpu, cur_cluster);
+ put_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ return ret;
+ }
+
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
@@ -509,7 +513,7 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
- put_cluster_clk_and_freq_table(cpu_dev);
+ put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
return 0;
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index b88889d9387e..184d7c3a112a 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -30,11 +30,11 @@ struct cpufreq_arm_bL_ops {
* This must set opp table for cpu_dev in a similar way as done by
* dev_pm_opp_of_add_table().
*/
- int (*init_opp_table)(struct device *cpu_dev);
+ int (*init_opp_table)(const struct cpumask *cpumask);
/* Optional */
int (*get_transition_latency)(struct device *cpu_dev);
- void (*free_opp_table)(struct device *cpu_dev);
+ void (*free_opp_table)(const struct cpumask *cpumask);
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 16ddeefe9443..39b3f51d9a30 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -43,23 +43,6 @@ static struct device_node *get_cpu_node_with_valid_op(int cpu)
return np;
}
-static int dt_init_opp_table(struct device *cpu_dev)
-{
- struct device_node *np;
- int ret;
-
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- pr_err("failed to find cpu%d node\n", cpu_dev->id);
- return -ENOENT;
- }
-
- ret = dev_pm_opp_of_add_table(cpu_dev);
- of_node_put(np);
-
- return ret;
-}
-
static int dt_get_transition_latency(struct device *cpu_dev)
{
struct device_node *np;
@@ -81,8 +64,8 @@ static int dt_get_transition_latency(struct device *cpu_dev)
static struct cpufreq_arm_bL_ops dt_bL_ops = {
.name = "dt-bl",
.get_transition_latency = dt_get_transition_latency,
- .init_opp_table = dt_init_opp_table,
- .free_opp_table = dev_pm_opp_of_remove_table,
+ .init_opp_table = dev_pm_opp_of_cpumask_add_table,
+ .free_opp_table = dev_pm_opp_of_cpumask_remove_table,
};
static int generic_bL_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 7c0bdfb1a2ca..8882b8e2ecd0 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -173,4 +173,25 @@ out:
return -ENODEV;
}
+static void __exit cppc_cpufreq_exit(void)
+{
+ struct cpudata *cpu;
+ int i;
+
+ cpufreq_unregister_driver(&cppc_cpufreq_driver);
+
+ for_each_possible_cpu(i) {
+ cpu = all_cpu_data[i];
+ free_cpumask_var(cpu->shared_cpu_map);
+ kfree(cpu);
+ }
+
+ kfree(all_cpu_data);
+}
+
+module_exit(cppc_cpufreq_exit);
+MODULE_AUTHOR("Ashwin Chaugule");
+MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
+MODULE_LICENSE("GPL");
+
late_initcall(cppc_cpufreq_init);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
new file mode 100644
index 000000000000..3646b143bbf5
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const struct of_device_id machines[] __initconst = {
+ { .compatible = "allwinner,sun4i-a10", },
+ { .compatible = "allwinner,sun5i-a10s", },
+ { .compatible = "allwinner,sun5i-a13", },
+ { .compatible = "allwinner,sun5i-r8", },
+ { .compatible = "allwinner,sun6i-a31", },
+ { .compatible = "allwinner,sun6i-a31s", },
+ { .compatible = "allwinner,sun7i-a20", },
+ { .compatible = "allwinner,sun8i-a23", },
+ { .compatible = "allwinner,sun8i-a33", },
+ { .compatible = "allwinner,sun8i-a83t", },
+ { .compatible = "allwinner,sun8i-h3", },
+
+ { .compatible = "hisilicon,hi6220", },
+
+ { .compatible = "fsl,imx27", },
+ { .compatible = "fsl,imx51", },
+ { .compatible = "fsl,imx53", },
+ { .compatible = "fsl,imx7d", },
+
+ { .compatible = "marvell,berlin", },
+
+ { .compatible = "samsung,exynos3250", },
+ { .compatible = "samsung,exynos4210", },
+ { .compatible = "samsung,exynos4212", },
+ { .compatible = "samsung,exynos4412", },
+ { .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+ { .compatible = "samsung,exynos5420", },
+ { .compatible = "samsung,exynos5800", },
+#endif
+
+ { .compatible = "renesas,emev2", },
+ { .compatible = "renesas,r7s72100", },
+ { .compatible = "renesas,r8a73a4", },
+ { .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7778", },
+ { .compatible = "renesas,r8a7779", },
+ { .compatible = "renesas,r8a7790", },
+ { .compatible = "renesas,r8a7791", },
+ { .compatible = "renesas,r8a7793", },
+ { .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,sh73a0", },
+
+ { .compatible = "rockchip,rk2928", },
+ { .compatible = "rockchip,rk3036", },
+ { .compatible = "rockchip,rk3066a", },
+ { .compatible = "rockchip,rk3066b", },
+ { .compatible = "rockchip,rk3188", },
+ { .compatible = "rockchip,rk3228", },
+ { .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3366", },
+ { .compatible = "rockchip,rk3368", },
+ { .compatible = "rockchip,rk3399", },
+
+ { .compatible = "sigma,tango4" },
+
+ { .compatible = "ti,omap2", },
+ { .compatible = "ti,omap3", },
+ { .compatible = "ti,omap4", },
+ { .compatible = "ti,omap5", },
+
+ { .compatible = "xlnx,zynq-7000", },
+};
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!np)
+ return -ENODEV;
+
+ if (!of_match_node(machines, np))
+ return -ENODEV;
+
+ of_node_put(of_root);
+
+ return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0));
+}
+device_initcall(cpufreq_dt_platdev_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 5f8dbe640a20..3957de801ae8 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -15,7 +15,6 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
-#include <linux/cpufreq-dt.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -147,7 +146,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
unsigned int transition_latency;
- bool opp_v1 = false;
+ bool fallback = false;
const char *name;
int ret;
@@ -167,14 +166,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
+ if (ret != -ENOENT)
+ goto out_put_clk;
+
/*
* operating-points-v2 not supported, fallback to old method of
- * finding shared-OPPs for backward compatibility.
+ * finding shared-OPPs for backward compatibility if the
+ * platform hasn't set sharing CPUs.
*/
- if (ret == -ENOENT)
- opp_v1 = true;
- else
- goto out_put_clk;
+ if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
+ fallback = true;
}
/*
@@ -214,11 +215,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- if (opp_v1) {
- struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
-
- if (!pd || !pd->independent_clocks)
- cpumask_setall(policy->cpus);
+ if (fallback) {
+ cpumask_setall(policy->cpus);
/*
* OPP tables are initialized only for policy->cpu, do it for
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index db69eeb501a7..5503d491b016 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -7,6 +7,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -56,8 +58,6 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
MODULE_PARM_DESC(min_fsb,
"Minimum FSB to use, if not defined: current FSB - 50");
-#define PFX "cpufreq-nforce2: "
-
/**
* nforce2_calc_fsb - calculate FSB
* @pll: PLL value
@@ -174,13 +174,13 @@ static int nforce2_set_fsb(unsigned int fsb)
int pll = 0;
if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
- printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
+ pr_err("FSB %d is out of range!\n", fsb);
return -EINVAL;
}
tfsb = nforce2_fsb_read(0);
if (!tfsb) {
- printk(KERN_ERR PFX "Error while reading the FSB\n");
+ pr_err("Error while reading the FSB\n");
return -EINVAL;
}
@@ -276,8 +276,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* local_irq_save(flags); */
if (nforce2_set_fsb(target_fsb) < 0)
- printk(KERN_ERR PFX "Changing FSB to %d failed\n",
- target_fsb);
+ pr_err("Changing FSB to %d failed\n", target_fsb);
else
pr_debug("Changed FSB successfully to %d\n",
target_fsb);
@@ -325,8 +324,7 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
/* FIX: Get FID from CPU */
if (!fid) {
if (!cpu_khz) {
- printk(KERN_WARNING PFX
- "cpu_khz not set, can't calculate multiplier!\n");
+ pr_warn("cpu_khz not set, can't calculate multiplier!\n");
return -ENODEV;
}
@@ -341,8 +339,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
}
}
- printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
- fid / 10, fid % 10);
+ pr_info("FSB currently at %i MHz, FID %d.%d\n",
+ fsb, fid / 10, fid % 10);
/* Set maximum FSB to FSB at boot time */
max_fsb = nforce2_fsb_read(1);
@@ -401,11 +399,9 @@ static int nforce2_detect_chipset(void)
if (nforce2_dev == NULL)
return -ENODEV;
- printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
- nforce2_dev->revision);
- printk(KERN_INFO PFX
- "FSB changing is maybe unstable and can lead to "
- "crashes and data loss.\n");
+ pr_info("Detected nForce2 chipset revision %X\n",
+ nforce2_dev->revision);
+ pr_info("FSB changing is maybe unstable and can lead to crashes and data loss\n");
return 0;
}
@@ -423,7 +419,7 @@ static int __init nforce2_init(void)
/* detect chipset */
if (nforce2_detect_chipset()) {
- printk(KERN_INFO PFX "No nForce2 chipset.\n");
+ pr_info("No nForce2 chipset\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c4acfc5273b3..035513b012ee 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -78,6 +78,11 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
+static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+ return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+}
+
/**
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
@@ -429,6 +434,73 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
+/*
+ * Fast frequency switching status count. Positive means "enabled", negative
+ * means "disabled" and 0 means "not decided yet".
+ */
+static int cpufreq_fast_switch_count;
+static DEFINE_MUTEX(cpufreq_fast_switch_lock);
+
+static void cpufreq_list_transition_notifiers(void)
+{
+ struct notifier_block *nb;
+
+ pr_info("Registered transition notifiers:\n");
+
+ mutex_lock(&cpufreq_transition_notifier_list.mutex);
+
+ for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
+ pr_info("%pF\n", nb->notifier_call);
+
+ mutex_unlock(&cpufreq_transition_notifier_list.mutex);
+}
+
+/**
+ * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ *
+ * Try to enable fast frequency switching for @policy.
+ *
+ * The attempt will fail if there is at least one transition notifier registered
+ * at this point, as fast frequency switching is quite fundamentally at odds
+ * with transition notifiers. Thus if successful, it will make registration of
+ * transition notifiers fail going forward.
+ */
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
+{
+ lockdep_assert_held(&policy->rwsem);
+
+ if (!policy->fast_switch_possible)
+ return;
+
+ mutex_lock(&cpufreq_fast_switch_lock);
+ if (cpufreq_fast_switch_count >= 0) {
+ cpufreq_fast_switch_count++;
+ policy->fast_switch_enabled = true;
+ } else {
+ pr_warn("CPU%u: Fast frequency switching not enabled\n",
+ policy->cpu);
+ cpufreq_list_transition_notifiers();
+ }
+ mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
+
+/**
+ * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
+ * @policy: cpufreq policy to disable fast frequency switching for.
+ */
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+{
+ mutex_lock(&cpufreq_fast_switch_lock);
+ if (policy->fast_switch_enabled) {
+ policy->fast_switch_enabled = false;
+ if (!WARN_ON(cpufreq_fast_switch_count <= 0))
+ cpufreq_fast_switch_count--;
+ }
+ mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
/*********************************************************************
* SYSFS INTERFACE *
@@ -1248,26 +1320,24 @@ out_free_policy:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
+ struct cpufreq_policy *policy;
unsigned cpu = dev->id;
- int ret;
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
- if (cpu_online(cpu)) {
- ret = cpufreq_online(cpu);
- } else {
- /*
- * A hotplug notifier will follow and we will handle it as CPU
- * online then. For now, just create the sysfs link, unless
- * there is no policy or the link is already present.
- */
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (cpu_online(cpu))
+ return cpufreq_online(cpu);
- ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
- ? add_cpu_dev_symlink(policy, cpu) : 0;
- }
+ /*
+ * A hotplug notifier will follow and we will handle it as CPU online
+ * then. For now, just create the sysfs link, unless there is no policy
+ * or the link is already present.
+ */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+ return 0;
- return ret;
+ return add_cpu_dev_symlink(policy, cpu);
}
static void cpufreq_offline(unsigned int cpu)
@@ -1319,7 +1389,7 @@ static void cpufreq_offline(unsigned int cpu)
/* If cpu is last user of policy, free policy */
if (has_target()) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ ret = cpufreq_exit_governor(policy);
if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
}
@@ -1447,8 +1517,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
ret_freq = cpufreq_driver->get(policy->cpu);
- /* Updating inactive policies is invalid, so avoid doing that. */
- if (unlikely(policy_is_inactive(policy)))
+ /*
+ * Updating inactive policies is invalid, so avoid doing that. Also
+ * if fast frequency switching is used with the given policy, the check
+ * against policy->cur is pointless, so skip it in that case too.
+ */
+ if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
@@ -1679,8 +1753,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
+ mutex_lock(&cpufreq_fast_switch_lock);
+
+ if (cpufreq_fast_switch_count > 0) {
+ mutex_unlock(&cpufreq_fast_switch_lock);
+ return -EBUSY;
+ }
ret = srcu_notifier_chain_register(
&cpufreq_transition_notifier_list, nb);
+ if (!ret)
+ cpufreq_fast_switch_count--;
+
+ mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_register(
@@ -1713,8 +1797,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
+ mutex_lock(&cpufreq_fast_switch_lock);
+
ret = srcu_notifier_chain_unregister(
&cpufreq_transition_notifier_list, nb);
+ if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
+ cpufreq_fast_switch_count++;
+
+ mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_unregister(
@@ -1733,6 +1823,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* GOVERNORS *
*********************************************************************/
+/**
+ * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
+ * @policy: cpufreq policy to switch the frequency for.
+ * @target_freq: New frequency to set (may be approximate).
+ *
+ * Carry out a fast frequency switch without sleeping.
+ *
+ * The driver's ->fast_switch() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select the minimum available frequency greater than or
+ * equal to @target_freq (CPUFREQ_RELATION_L).
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same policy and that it will never be called in
+ * parallel with either ->target() or ->target_index() for the same policy.
+ *
+ * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
+ * callback to indicate an error condition, the hardware configuration must be
+ * preserved.
+ */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ clamp_val(target_freq, policy->min, policy->max);
+
+ return cpufreq_driver->fast_switch(policy, target_freq);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+
/* Must set freqs->new to intermediate frequency */
static int __target_intermediate(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int index)
@@ -2108,7 +2229,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
- ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ ret = cpufreq_exit_governor(policy);
if (ret) {
pr_err("%s: Failed to Exit Governor: %s (%d)\n",
__func__, old_gov->name, ret);
@@ -2125,7 +2246,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("cpufreq: governor change\n");
return 0;
}
- cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ cpufreq_exit_governor(policy);
}
/* new governor failed, so re-start old one */
@@ -2193,16 +2314,13 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
cpufreq_online(cpu);
break;
case CPU_DOWN_PREPARE:
cpufreq_offline(cpu);
break;
-
- case CPU_DOWN_FAILED:
- cpufreq_online(cpu);
- break;
}
return NOTIFY_OK;
}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bf4913f6453b..316df247e00d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -129,9 +129,10 @@ static struct notifier_block cs_cpufreq_notifier_block = {
/************************** sysfs interface ************************/
static struct dbs_governor cs_dbs_gov;
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -143,9 +144,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
@@ -158,9 +160,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
@@ -175,9 +178,10 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
@@ -199,9 +203,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5f1147fa9239..be498d56dd69 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -43,9 +43,10 @@ static DEFINE_MUTEX(gov_dbs_data_mutex);
* This must be called with dbs_data->mutex held, otherwise traversing
* policy_dbs_list isn't safe.
*/
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
unsigned int rate;
int ret;
@@ -59,7 +60,7 @@ ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
*/
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
mutex_lock(&policy_dbs->timer_mutex);
/*
* On 32-bit architectures this may race with the
@@ -96,13 +97,13 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
{
struct policy_dbs_info *policy_dbs;
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
unsigned int j;
for_each_cpu(j, policy_dbs->policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
dbs_data->io_is_busy);
if (dbs_data->ignore_nice_load)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -111,54 +112,6 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
}
EXPORT_SYMBOL_GPL(gov_update_cpu_data);
-static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
-{
- return container_of(kobj, struct dbs_data, kobj);
-}
-
-static inline struct governor_attr *to_gov_attr(struct attribute *attr)
-{
- return container_of(attr, struct governor_attr, attr);
-}
-
-static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct dbs_data *dbs_data = to_dbs_data(kobj);
- struct governor_attr *gattr = to_gov_attr(attr);
-
- return gattr->show(dbs_data, buf);
-}
-
-static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct dbs_data *dbs_data = to_dbs_data(kobj);
- struct governor_attr *gattr = to_gov_attr(attr);
- int ret = -EBUSY;
-
- mutex_lock(&dbs_data->mutex);
-
- if (dbs_data->usage_count)
- ret = gattr->store(dbs_data, buf, count);
-
- mutex_unlock(&dbs_data->mutex);
-
- return ret;
-}
-
-/*
- * Sysfs Ops for accessing governor attributes.
- *
- * All show/store invocations for governor specific sysfs attributes, will first
- * call the below show/store callbacks and the attribute specific callback will
- * be called from within it.
- */
-static const struct sysfs_ops governor_sysfs_ops = {
- .show = governor_show,
- .store = governor_store,
-};
-
unsigned int dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -184,14 +137,14 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- u64 cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
+ u64 update_time, cur_idle_time;
+ unsigned int idle_time, time_elapsed;
unsigned int load;
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
+ cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
- wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
- j_cdbs->prev_cpu_wall = cur_wall_time;
+ time_elapsed = update_time - j_cdbs->prev_update_time;
+ j_cdbs->prev_update_time = update_time;
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
j_cdbs->prev_cpu_idle = cur_idle_time;
@@ -203,47 +156,62 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
j_cdbs->prev_cpu_nice = cur_nice;
}
- if (unlikely(!wall_time || wall_time < idle_time))
- continue;
-
- /*
- * If the CPU had gone completely idle, and a task just woke up
- * on this CPU now, it would be unfair to calculate 'load' the
- * usual way for this elapsed time-window, because it will show
- * near-zero load, irrespective of how CPU intensive that task
- * actually is. This is undesirable for latency-sensitive bursty
- * workloads.
- *
- * To avoid this, we reuse the 'load' from the previous
- * time-window and give this task a chance to start with a
- * reasonably high CPU frequency. (However, we shouldn't over-do
- * this copy, lest we get stuck at a high load (high frequency)
- * for too long, even when the current system load has actually
- * dropped down. So we perform the copy only once, upon the
- * first wake-up from idle.)
- *
- * Detecting this situation is easy: the governor's utilization
- * update handler would not have run during CPU-idle periods.
- * Hence, an unusually large 'wall_time' (as compared to the
- * sampling rate) indicates this scenario.
- *
- * prev_load can be zero in two cases and we must recalculate it
- * for both cases:
- * - during long idle intervals
- * - explicitly set to zero
- */
- if (unlikely(wall_time > (2 * sampling_rate) &&
- j_cdbs->prev_load)) {
+ if (unlikely(!time_elapsed)) {
+ /*
+ * That can only happen when this function is called
+ * twice in a row with a very short interval between the
+ * calls, so the previous load value can be used then.
+ */
load = j_cdbs->prev_load;
-
+ } else if (unlikely(time_elapsed > 2 * sampling_rate &&
+ j_cdbs->prev_load)) {
/*
- * Perform a destructive copy, to ensure that we copy
- * the previous load only once, upon the first wake-up
- * from idle.
+ * If the CPU had gone completely idle and a task has
+ * just woken up on this CPU now, it would be unfair to
+ * calculate 'load' the usual way for this elapsed
+ * time-window, because it would show near-zero load,
+ * irrespective of how CPU intensive that task actually
+ * was. This is undesirable for latency-sensitive bursty
+ * workloads.
+ *
+ * To avoid this, reuse the 'load' from the previous
+ * time-window and give this task a chance to start with
+ * a reasonably high CPU frequency. However, that
+ * shouldn't be over-done, lest we get stuck at a high
+ * load (high frequency) for too long, even when the
+ * current system load has actually dropped down, so
+ * clear prev_load to guarantee that the load will be
+ * computed again next time.
+ *
+ * Detecting this situation is easy: the governor's
+ * utilization update handler would not have run during
+ * CPU-idle periods. Hence, an unusually large
+ * 'time_elapsed' (as compared to the sampling rate)
+ * indicates this scenario.
*/
+ load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
- load = 100 * (wall_time - idle_time) / wall_time;
+ if (time_elapsed >= idle_time) {
+ load = 100 * (time_elapsed - idle_time) / time_elapsed;
+ } else {
+ /*
+ * That can happen if idle_time is returned by
+ * get_cpu_idle_time_jiffy(). In that case
+ * idle_time is roughly equal to the difference
+ * between time_elapsed and "busy time" obtained
+ * from CPU statistics. Then, the "busy time"
+ * can end up being greater than time_elapsed
+ * (for example, if jiffies_64 and the CPU
+ * statistics are updated by different CPUs),
+ * so idle_time may in fact be negative. That
+ * means, though, that the CPU was busy all
+ * the time (on the rough average) during the
+ * last sampling interval and 100 can be
+ * returned as the load.
+ */
+ load = (int)idle_time < 0 ? 100 : 0;
+ }
j_cdbs->prev_load = load;
}
@@ -254,43 +222,6 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(dbs_update);
-static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
- unsigned int delay_us)
-{
- struct cpufreq_policy *policy = policy_dbs->policy;
- int cpu;
-
- gov_update_sample_delay(policy_dbs, delay_us);
- policy_dbs->last_sample_time = 0;
-
- for_each_cpu(cpu, policy->cpus) {
- struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
-
- cpufreq_set_update_util_data(cpu, &cdbs->update_util);
- }
-}
-
-static inline void gov_clear_update_util(struct cpufreq_policy *policy)
-{
- int i;
-
- for_each_cpu(i, policy->cpus)
- cpufreq_set_update_util_data(i, NULL);
-
- synchronize_sched();
-}
-
-static void gov_cancel_work(struct cpufreq_policy *policy)
-{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
-
- gov_clear_update_util(policy_dbs->policy);
- irq_work_sync(&policy_dbs->irq_work);
- cancel_work_sync(&policy_dbs->work);
- atomic_set(&policy_dbs->work_count, 0);
- policy_dbs->work_in_progress = false;
-}
-
static void dbs_work_handler(struct work_struct *work)
{
struct policy_dbs_info *policy_dbs;
@@ -378,6 +309,44 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
irq_work_queue(&policy_dbs->irq_work);
}
+static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
+{
+ struct cpufreq_policy *policy = policy_dbs->policy;
+ int cpu;
+
+ gov_update_sample_delay(policy_dbs, delay_us);
+ policy_dbs->last_sample_time = 0;
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+
+ cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
+ dbs_update_util_handler);
+ }
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+ int i;
+
+ for_each_cpu(i, policy->cpus)
+ cpufreq_remove_update_util_hook(i);
+
+ synchronize_sched();
+}
+
+static void gov_cancel_work(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+
+ gov_clear_update_util(policy_dbs->policy);
+ irq_work_sync(&policy_dbs->irq_work);
+ cancel_work_sync(&policy_dbs->work);
+ atomic_set(&policy_dbs->work_count, 0);
+ policy_dbs->work_in_progress = false;
+}
+
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
struct dbs_governor *gov)
{
@@ -400,7 +369,6 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->policy_dbs = policy_dbs;
- j_cdbs->update_util.func = dbs_update_util_handler;
}
return policy_dbs;
}
@@ -449,10 +417,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
policy_dbs->dbs_data = dbs_data;
policy->governor_data = policy_dbs;
- mutex_lock(&dbs_data->mutex);
- dbs_data->usage_count++;
- list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
- mutex_unlock(&dbs_data->mutex);
+ gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
goto out;
}
@@ -462,8 +427,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
goto free_policy_dbs_info;
}
- INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
- mutex_init(&dbs_data->mutex);
+ gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
ret = gov->init(dbs_data, !policy->governor->initialized);
if (ret)
@@ -483,14 +447,11 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
- policy->governor_data = policy_dbs;
-
policy_dbs->dbs_data = dbs_data;
- dbs_data->usage_count = 1;
- list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
+ policy->governor_data = policy_dbs;
gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
- ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
+ ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
get_governor_parent_kobj(policy),
"%s", gov->gov.name);
if (!ret)
@@ -519,29 +480,21 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
- int count;
+ unsigned int count;
/* Protect gov->gdbs_data against concurrent updates. */
mutex_lock(&gov_dbs_data_mutex);
- mutex_lock(&dbs_data->mutex);
- list_del(&policy_dbs->list);
- count = --dbs_data->usage_count;
- mutex_unlock(&dbs_data->mutex);
+ count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
- if (!count) {
- kobject_put(&dbs_data->kobj);
-
- policy->governor_data = NULL;
+ policy->governor_data = NULL;
+ if (!count) {
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data, policy->governor->initialized == 1);
- mutex_destroy(&dbs_data->mutex);
kfree(dbs_data);
- } else {
- policy->governor_data = NULL;
}
free_policy_dbs_info(policy_dbs, gov);
@@ -570,12 +523,12 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- unsigned int prev_load;
- j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
-
- prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
- j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
+ /*
+ * Make the first invocation of dbs_update() compute the load.
+ */
+ j_cdbs->prev_load = 0;
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 61ff82fe0613..34eb214b6d57 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -24,20 +24,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor. The
- * governor will work on any processor with transition latency <= 10ms, using
- * appropriate sampling rate.
- *
- * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work. All times here are in us (micro seconds).
- */
-#define MIN_SAMPLING_RATE_RATIO (2)
-#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (20)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-
/* Ondemand Sampling types */
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
@@ -52,7 +38,7 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/* Governor demand based switching data (per-policy or global). */
struct dbs_data {
- int usage_count;
+ struct gov_attr_set attr_set;
void *tuners;
unsigned int min_sampling_rate;
unsigned int ignore_nice_load;
@@ -60,37 +46,27 @@ struct dbs_data {
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int io_is_busy;
-
- struct kobject kobj;
- struct list_head policy_dbs_list;
- /*
- * Protect concurrent updates to governor tunables from sysfs,
- * policy_dbs_list and usage_count.
- */
- struct mutex mutex;
};
-/* Governor's specific attributes */
-struct dbs_data;
-struct governor_attr {
- struct attribute attr;
- ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
- ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
- size_t count);
-};
+static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
+{
+ return container_of(attr_set, struct dbs_data, attr_set);
+}
#define gov_show_one(_gov, file_name) \
static ssize_t show_##file_name \
-(struct dbs_data *dbs_data, char *buf) \
+(struct gov_attr_set *attr_set, char *buf) \
{ \
+ struct dbs_data *dbs_data = to_dbs_data(attr_set); \
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
}
#define gov_show_one_common(file_name) \
static ssize_t show_##file_name \
-(struct dbs_data *dbs_data, char *buf) \
+(struct gov_attr_set *attr_set, char *buf) \
{ \
+ struct dbs_data *dbs_data = to_dbs_data(attr_set); \
return sprintf(buf, "%u\n", dbs_data->file_name); \
}
@@ -135,7 +111,7 @@ static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
/* Per cpu structures */
struct cpu_dbs_info {
u64 prev_cpu_idle;
- u64 prev_cpu_wall;
+ u64 prev_update_time;
u64 prev_cpu_nice;
/*
* Used to keep track of load in the previous interval. However, when
@@ -184,7 +160,7 @@ void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void);
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
size_t count);
void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
new file mode 100644
index 000000000000..52841f807a7e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -0,0 +1,84 @@
+/*
+ * Abstract code for CPUFreq governor tunable sysfs attributes.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+ return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct governor_attr *gattr = to_gov_attr(attr);
+
+ return gattr->show(to_gov_attr_set(kobj), buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+ int ret;
+
+ mutex_lock(&attr_set->update_lock);
+ ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
+ mutex_unlock(&attr_set->update_lock);
+ return ret;
+}
+
+const struct sysfs_ops governor_sysfs_ops = {
+ .show = governor_show,
+ .store = governor_store,
+};
+EXPORT_SYMBOL_GPL(governor_sysfs_ops);
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ INIT_LIST_HEAD(&attr_set->policy_list);
+ mutex_init(&attr_set->update_lock);
+ attr_set->usage_count = 1;
+ list_add(list_node, &attr_set->policy_list);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_init);
+
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ mutex_lock(&attr_set->update_lock);
+ attr_set->usage_count++;
+ list_add(list_node, &attr_set->policy_list);
+ mutex_unlock(&attr_set->update_lock);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_get);
+
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ unsigned int count;
+
+ mutex_lock(&attr_set->update_lock);
+ list_del(list_node);
+ count = --attr_set->usage_count;
+ mutex_unlock(&attr_set->update_lock);
+ if (count)
+ return count;
+
+ kobject_put(&attr_set->kobj);
+ mutex_destroy(&attr_set->update_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_put);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index acd80272ded6..300163430516 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -207,9 +207,10 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
static struct dbs_governor od_dbs_gov;
-static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
@@ -224,9 +225,10 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -240,9 +242,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
unsigned int input;
int ret;
@@ -254,7 +257,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
dbs_data->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
/*
* Doing this without locking might lead to using different
* rate_mult values in od_update() and od_dbs_timer().
@@ -267,9 +270,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
@@ -291,9 +295,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct policy_dbs_info *policy_dbs;
unsigned int input;
@@ -308,7 +313,7 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
od_tuners->powersave_bias = input;
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list)
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
ondemand_powersave_bias_init(policy_dbs->policy);
return count;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 4d16f45ee1da..9f3dec9a3f36 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX(userspace_mutex);
@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
+ unsigned int *setspeed = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
+ *setspeed = freq;
+
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
mutex_unlock(&userspace_mutex);
@@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cur);
}
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed;
+
+ setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
+ if (!setspeed)
+ return -ENOMEM;
+
+ policy->governor_data = setspeed;
+ return 0;
+}
+
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
+ unsigned int *setspeed = policy->governor_data;
unsigned int cpu = policy->cpu;
int rc = 0;
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ return cpufreq_userspace_policy_init(policy);
+
+ if (!setspeed)
+ return -EINVAL;
+
switch (event) {
+ case CPUFREQ_GOV_POLICY_EXIT:
+ mutex_lock(&userspace_mutex);
+ policy->governor_data = NULL;
+ kfree(setspeed);
+ mutex_unlock(&userspace_mutex);
+ break;
case CPUFREQ_GOV_START:
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", cpu);
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 1;
+ *setspeed = policy->cur;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
@@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 0;
+ *setspeed = 0;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
- cpu, policy->min, policy->max,
- policy->cur);
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+ cpu, policy->min, policy->max, policy->cur, *setspeed);
- if (policy->max < policy->cur)
+ if (policy->max < *setspeed)
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
+ else if (policy->min > *setspeed)
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
+ else
+ __cpufreq_driver_target(policy, *setspeed,
+ CPUFREQ_RELATION_L);
mutex_unlock(&userspace_mutex);
break;
}
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 4085244c8a67..cdf097b29862 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -6,6 +6,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -20,7 +22,7 @@
#include <asm/msr.h>
#include <asm/tsc.h>
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
#include <linux/acpi.h>
#include <acpi/processor.h>
#endif
@@ -33,7 +35,7 @@
struct eps_cpu_data {
u32 fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
u32 bios_limit;
#endif
struct cpufreq_frequency_table freq_table[];
@@ -46,7 +48,7 @@ static int freq_failsafe_off;
static int voltage_failsafe_off;
static int set_max_voltage;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
static int ignore_acpi_limit;
static struct acpi_processor_performance *eps_acpi_cpu_perf;
@@ -141,11 +143,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
- printk(KERN_INFO "eps: Current voltage = %dmV\n",
- current_voltage * 16 + 700);
+ pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
- printk(KERN_INFO "eps: Current multiplier = %d\n",
- current_multiplier);
+ pr_info("Current multiplier = %d\n", current_multiplier);
}
#endif
return 0;
@@ -166,7 +166,7 @@ static int eps_target(struct cpufreq_policy *policy, unsigned int index)
dest_state = centaur->freq_table[index].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
- printk(KERN_ERR "eps: Timeout!\n");
+ pr_err("Timeout!\n");
return ret;
}
@@ -186,7 +186,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
int k, step, voltage;
int ret;
int states;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
unsigned int limit;
#endif
@@ -194,36 +194,36 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
/* Check brand */
- printk(KERN_INFO "eps: Detected VIA ");
+ pr_info("Detected VIA ");
switch (c->x86_model) {
case 10:
rdmsr(0x1153, lo, hi);
brand = (((lo >> 2) ^ lo) >> 18) & 3;
- printk(KERN_CONT "Model A ");
+ pr_cont("Model A ");
break;
case 13:
rdmsr(0x1154, lo, hi);
brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
- printk(KERN_CONT "Model D ");
+ pr_cont("Model D ");
break;
}
switch (brand) {
case EPS_BRAND_C7M:
- printk(KERN_CONT "C7-M\n");
+ pr_cont("C7-M\n");
break;
case EPS_BRAND_C7:
- printk(KERN_CONT "C7\n");
+ pr_cont("C7\n");
break;
case EPS_BRAND_EDEN:
- printk(KERN_CONT "Eden\n");
+ pr_cont("Eden\n");
break;
case EPS_BRAND_C7D:
- printk(KERN_CONT "C7-D\n");
+ pr_cont("C7-D\n");
break;
case EPS_BRAND_C3:
- printk(KERN_CONT "C3\n");
+ pr_cont("C3\n");
return -ENODEV;
break;
}
@@ -235,7 +235,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
- printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
+ pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
}
}
@@ -243,22 +243,19 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
- printk(KERN_INFO "eps: Current voltage = %dmV\n",
- current_voltage * 16 + 700);
+ pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
- printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
+ pr_info("Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
- printk(KERN_INFO "eps: Highest voltage = %dmV\n",
- max_voltage * 16 + 700);
+ pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
- printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
+ pr_info("Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
- printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
- min_voltage * 16 + 700);
+ pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
- printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
+ pr_info("Lowest multiplier = %d\n", min_multiplier);
/* Sanity checks */
if (current_multiplier == 0 || max_multiplier == 0
@@ -276,34 +273,30 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Check for systems using underclocked CPU */
if (!freq_failsafe_off && max_multiplier != current_multiplier) {
- printk(KERN_INFO "eps: Your processor is running at different "
- "frequency then its maximum. Aborting.\n");
- printk(KERN_INFO "eps: You can use freq_failsafe_off option "
- "to disable this check.\n");
+ pr_info("Your processor is running at different frequency then its maximum. Aborting.\n");
+ pr_info("You can use freq_failsafe_off option to disable this check.\n");
return -EINVAL;
}
if (!voltage_failsafe_off && max_voltage != current_voltage) {
- printk(KERN_INFO "eps: Your processor is running at different "
- "voltage then its maximum. Aborting.\n");
- printk(KERN_INFO "eps: You can use voltage_failsafe_off "
- "option to disable this check.\n");
+ pr_info("Your processor is running at different voltage then its maximum. Aborting.\n");
+ pr_info("You can use voltage_failsafe_off option to disable this check.\n");
return -EINVAL;
}
/* Calc FSB speed */
fsb = cpu_khz / current_multiplier;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
/* Check for ACPI processor speed limit */
if (!ignore_acpi_limit && !eps_acpi_init()) {
if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
- printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n",
+ pr_info("ACPI limit %u.%uGHz\n",
limit/1000000,
(limit%1000000)/10000);
eps_acpi_exit(policy);
/* Check if max_multiplier is in BIOS limits */
if (limit && max_multiplier * fsb > limit) {
- printk(KERN_INFO "eps: Aborting.\n");
+ pr_info("Aborting\n");
return -EINVAL;
}
}
@@ -319,8 +312,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
v = (set_max_voltage - 700) / 16;
/* Check if voltage is within limits */
if (v >= min_voltage && v <= max_voltage) {
- printk(KERN_INFO "eps: Setting %dmV as maximum.\n",
- v * 16 + 700);
+ pr_info("Setting %dmV as maximum\n", v * 16 + 700);
max_voltage = v;
}
}
@@ -341,7 +333,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Copy basic values */
centaur->fsb = fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
centaur->bios_limit = limit;
#endif
@@ -426,7 +418,7 @@ module_param(freq_failsafe_off, int, 0644);
MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
module_param(voltage_failsafe_off, int, 0644);
MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
module_param(ignore_acpi_limit, int, 0644);
MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
#endif
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 1c06e786c9ba..bfce11cba1df 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -185,7 +187,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
static int __init elanfreq_setup(char *str)
{
max_freq = simple_strtoul(str, &str, 0);
- printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
+ pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
return 1;
}
__setup("elanfreq=", elanfreq_setup);
diff --git a/drivers/cpufreq/hisi-acpu-cpufreq.c b/drivers/cpufreq/hisi-acpu-cpufreq.c
deleted file mode 100644
index 026d5b2224de..000000000000
--- a/drivers/cpufreq/hisi-acpu-cpufreq.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Hisilicon Platforms Using ACPU CPUFreq Support
- *
- * Copyright (c) 2015 Hisilicon Limited.
- * Copyright (c) 2015 Linaro Limited.
- *
- * Leo Yan <leo.yan@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-static int __init hisi_acpu_cpufreq_driver_init(void)
-{
- struct platform_device *pdev;
-
- if (!of_machine_is_compatible("hisilicon,hi6220"))
- return -ENODEV;
-
- pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
- return PTR_ERR_OR_ZERO(pdev);
-}
-module_init(hisi_acpu_cpufreq_driver_init);
-
-MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
-MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 0202429f1c5b..759612da4fdc 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -8,6 +8,8 @@
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -118,8 +120,7 @@ processor_get_freq (
if (ret) {
set_cpus_allowed_ptr(current, &saved_mask);
- printk(KERN_WARNING "get performance failed with error %d\n",
- ret);
+ pr_warn("get performance failed with error %d\n", ret);
ret = 0;
goto migrate_end;
}
@@ -177,7 +178,7 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
- printk(KERN_WARNING "Transition failed with error %d\n", ret);
+ pr_warn("Transition failed with error %d\n", ret);
retval = -ENODEV;
goto migrate_end;
}
@@ -291,8 +292,7 @@ acpi_cpufreq_cpu_init (
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
- printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
- "activated.\n", cpu);
+ pr_info("CPU%u - ACPI performance management activated\n", cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b230ebaae66c..b76a98dd9988 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -10,6 +10,8 @@
* of the License.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
@@ -39,10 +41,17 @@
#define ATOM_TURBO_RATIOS 0x66c
#define ATOM_TURBO_VIDS 0x66d
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+#endif
+
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define EXT_BITS 6
+#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+
static inline int32_t mul_fp(int32_t x, int32_t y)
{
return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
@@ -64,12 +73,22 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
+static inline u64 mul_ext_fp(u64 x, u64 y)
+{
+ return (x * y) >> EXT_FRAC_BITS;
+}
+
+static inline u64 div_ext_fp(u64 x, u64 y)
+{
+ return div64_u64(x << EXT_FRAC_BITS, y);
+}
+
/**
* struct sample - Store performance sample
- * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
+ * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
* performance during last sample period
* @busy_scaled: Scaled busy value which is used to calculate next
- * P state. This can be different than core_pct_busy
+ * P state. This can be different than core_avg_perf
* to account for cpu idle period
* @aperf: Difference of actual performance frequency clock count
* read from APERF MSR between last and current sample
@@ -84,7 +103,7 @@ static inline int ceiling_fp(int32_t x)
* data for choosing next P State.
*/
struct sample {
- int32_t core_pct_busy;
+ int32_t core_avg_perf;
int32_t busy_scaled;
u64 aperf;
u64 mperf;
@@ -162,6 +181,7 @@ struct _pid {
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
* @update_util: CPUFreq utility callback information
+ * @update_util_set: CPUFreq utility callback is set
* @pstate: Stores P state limits for this CPU
* @vid: Stores VID limits for this CPU
* @pid: Stores PID parameters for this CPU
@@ -172,6 +192,8 @@ struct _pid {
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
+ * @acpi_perf_data: Stores ACPI perf information read from _PSS
+ * @valid_pss_table: Set to true for valid ACPI _PSS entries found
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -179,6 +201,7 @@ struct cpudata {
int cpu;
struct update_util_data update_util;
+ bool update_util_set;
struct pstate_data pstate;
struct vid_data vid;
@@ -190,6 +213,10 @@ struct cpudata {
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
+#ifdef CONFIG_ACPI
+ struct acpi_processor_performance acpi_perf_data;
+ bool valid_pss_table;
+#endif
};
static struct cpudata **all_cpu_data;
@@ -258,6 +285,9 @@ static struct pstate_adjust_policy pid_params;
static struct pstate_funcs pstate_funcs;
static int hwp_active;
+#ifdef CONFIG_ACPI
+static bool acpi_ppc;
+#endif
/**
* struct perf_limits - Store user and policy limits
@@ -331,6 +361,124 @@ static struct perf_limits *limits = &performance_limits;
static struct perf_limits *limits = &powersave_limits;
#endif
+#ifdef CONFIG_ACPI
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+ if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
+ acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
+ return true;
+
+ return acpi_ppc;
+}
+
+/*
+ * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
+ * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
+ * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
+ * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
+ * target ratio 0x17. The _PSS control value stores in a format which can be
+ * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
+ * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
+ * This function converts the _PSS control value to intel pstate driver format
+ * for comparison and assignment.
+ */
+static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
+{
+ return cpu->acpi_perf_data.states[index].control >> 8;
+}
+
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int turbo_pss_ctl;
+ int ret;
+ int i;
+
+ if (hwp_active)
+ return;
+
+ if (!intel_pstate_get_ppc_enable_status())
+ return;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
+ policy->cpu);
+ if (ret)
+ return;
+
+ /*
+ * Check if the control value in _PSS is for PERF_CTL MSR, which should
+ * guarantee that the states returned by it map to the states in our
+ * list directly.
+ */
+ if (cpu->acpi_perf_data.control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE)
+ goto err;
+
+ /*
+ * If there is only one entry _PSS, simply ignore _PSS and continue as
+ * usual without taking _PSS into account
+ */
+ if (cpu->acpi_perf_data.state_count < 2)
+ goto err;
+
+ pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
+ for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
+ pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
+ (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
+ (u32) cpu->acpi_perf_data.states[i].core_frequency,
+ (u32) cpu->acpi_perf_data.states[i].power,
+ (u32) cpu->acpi_perf_data.states[i].control);
+ }
+
+ /*
+ * The _PSS table doesn't contain whole turbo frequency range.
+ * This just contains +1 MHZ above the max non turbo frequency,
+ * with control value corresponding to max turbo ratio. But
+ * when cpufreq set policy is called, it will call with this
+ * max frequency, which will cause a reduced performance as
+ * this driver uses real max turbo frequency as the max
+ * frequency. So correct this frequency in _PSS table to
+ * correct max turbo frequency based on the turbo ratio.
+ * Also need to convert to MHz as _PSS freq is in MHz.
+ */
+ turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
+ if (turbo_pss_ctl > cpu->pstate.max_pstate)
+ cpu->acpi_perf_data.states[0].core_frequency =
+ policy->cpuinfo.max_freq / 1000;
+ cpu->valid_pss_table = true;
+ pr_info("_PPC limits will be enforced\n");
+
+ return;
+
+ err:
+ cpu->valid_pss_table = false;
+ acpi_processor_unregister_performance(policy->cpu);
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[policy->cpu];
+ if (!cpu->valid_pss_table)
+ return;
+
+ acpi_processor_unregister_performance(policy->cpu);
+}
+
+#else
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+}
+#endif
+
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
int deadband, int integral) {
pid->setpoint = int_tofp(setpoint);
@@ -341,17 +489,17 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
static inline void pid_p_gain_set(struct _pid *pid, int percent)
{
- pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->p_gain = div_fp(percent, 100);
}
static inline void pid_i_gain_set(struct _pid *pid, int percent)
{
- pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->i_gain = div_fp(percent, 100);
}
static inline void pid_d_gain_set(struct _pid *pid, int percent)
{
- pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->d_gain = div_fp(percent, 100);
}
static signed int pid_calc(struct _pid *pid, int32_t busy)
@@ -537,7 +685,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
- turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+ turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -579,7 +727,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
update_turbo_state();
if (limits->turbo_disabled) {
- pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
+ pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
return -EPERM;
}
@@ -608,8 +756,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
- limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
- int_tofp(100));
+ limits->max_perf = div_fp(limits->max_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
@@ -633,8 +780,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
- limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
- int_tofp(100));
+ limits->min_perf = div_fp(limits->min_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
@@ -1019,15 +1165,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
intel_pstate_set_min_pstate(cpu);
}
-static inline void intel_pstate_calc_busy(struct cpudata *cpu)
+static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
- int64_t core_pct;
- core_pct = int_tofp(sample->aperf) * int_tofp(100);
- core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
-
- sample->core_pct_busy = (int32_t)core_pct;
+ sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
}
static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
@@ -1070,9 +1212,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return fp_toint(mul_fp(cpu->sample.core_pct_busy,
- int_tofp(cpu->pstate.max_pstate_physical *
- cpu->pstate.scaling / 100)));
+ return mul_ext_fp(cpu->sample.core_avg_perf,
+ cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+}
+
+static inline int32_t get_avg_pstate(struct cpudata *cpu)
+{
+ return mul_ext_fp(cpu->pstate.max_pstate_physical,
+ cpu->sample.core_avg_perf);
}
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -1107,49 +1254,43 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
cpu->sample.busy_scaled = cpu_load;
- return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
+ return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
}
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
{
- int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+ int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
u64 duration_ns;
/*
- * core_busy is the ratio of actual performance to max
- * max_pstate is the max non turbo pstate available
- * current_pstate was the pstate that was requested during
- * the last sample period.
- *
- * We normalize core_busy, which was our actual percent
- * performance to what we requested during the last sample
- * period. The result will be a percentage of busy at a
- * specified pstate.
+ * perf_scaled is the average performance during the last sampling
+ * period scaled by the ratio of the maximum P-state to the P-state
+ * requested last time (in percent). That measures the system's
+ * response to the previous P-state selection.
*/
- core_busy = cpu->sample.core_pct_busy;
- max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
- current_pstate = int_tofp(cpu->pstate.current_pstate);
- core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ max_pstate = cpu->pstate.max_pstate_physical;
+ current_pstate = cpu->pstate.current_pstate;
+ perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
+ div_fp(100 * max_pstate, current_pstate));
/*
* Since our utilization update callback will not run unless we are
* in C0, check if the actual elapsed time is significantly greater (3x)
* than our sample interval. If it is, then we were idle for a long
- * enough period of time to adjust our busyness.
+ * enough period of time to adjust our performance metric.
*/
duration_ns = cpu->sample.time - cpu->last_sample_time;
if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
- sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
- int_tofp(duration_ns));
- core_busy = mul_fp(core_busy, sample_ratio);
+ sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
+ perf_scaled = mul_fp(perf_scaled, sample_ratio);
} else {
sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
if (sample_ratio < int_tofp(1))
- core_busy = 0;
+ perf_scaled = 0;
}
- cpu->sample.busy_scaled = core_busy;
- return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
+ cpu->sample.busy_scaled = perf_scaled;
+ return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
}
static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@ -1179,7 +1320,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
- trace_pstate_sample(fp_toint(sample->core_pct_busy),
+ trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
fp_toint(sample->busy_scaled),
from,
cpu->pstate.current_pstate,
@@ -1199,7 +1340,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
bool sample_taken = intel_pstate_sample(cpu, time);
if (sample_taken) {
- intel_pstate_calc_busy(cpu);
+ intel_pstate_calc_avg_perf(cpu);
if (!hwp_active)
intel_pstate_adjust_busy_pstate(cpu);
}
@@ -1261,23 +1402,16 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_busy_pid_reset(cpu);
- cpu->update_util.func = intel_pstate_update_util;
-
- pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
+ pr_debug("controlling: cpu %d\n", cpunum);
return 0;
}
static unsigned int intel_pstate_get(unsigned int cpu_num)
{
- struct sample *sample;
- struct cpudata *cpu;
+ struct cpudata *cpu = all_cpu_data[cpu_num];
- cpu = all_cpu_data[cpu_num];
- if (!cpu)
- return 0;
- sample = &cpu->sample;
- return get_avg_frequency(cpu);
+ return cpu ? get_avg_frequency(cpu) : 0;
}
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
@@ -1286,12 +1420,20 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
- cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+ cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
+ intel_pstate_update_util);
+ cpu->update_util_set = true;
}
static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{
- cpufreq_set_update_util_data(cpu, NULL);
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+
+ if (!cpu_data->update_util_set)
+ return;
+
+ cpufreq_remove_update_util_hook(cpu);
+ cpu_data->update_util_set = false;
synchronize_sched();
}
@@ -1311,20 +1453,31 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
+ struct cpudata *cpu;
+
if (!policy->cpuinfo.max_freq)
return -ENODEV;
intel_pstate_clear_update_util_hook(policy->cpu);
+ cpu = all_cpu_data[0];
+ if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
+ if (policy->max < policy->cpuinfo.max_freq &&
+ policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+ pr_debug("policy->max > max non turbo frequency\n");
+ policy->max = policy->cpuinfo.max_freq;
+ }
+ }
+
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
if (policy->max >= policy->cpuinfo.max_freq) {
- pr_debug("intel_pstate: set performance\n");
+ pr_debug("set performance\n");
intel_pstate_set_performance_limits(limits);
goto out;
}
} else {
- pr_debug("intel_pstate: set powersave\n");
+ pr_debug("set powersave\n");
limits = &powersave_limits;
}
@@ -1348,10 +1501,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
- limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
- int_tofp(100));
- limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
- int_tofp(100));
+ limits->min_perf = div_fp(limits->min_perf_pct, 100);
+ limits->max_perf = div_fp(limits->max_perf_pct, 100);
out:
intel_pstate_set_update_util_hook(policy->cpu);
@@ -1377,7 +1528,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
int cpu_num = policy->cpu;
struct cpudata *cpu = all_cpu_data[cpu_num];
- pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
+ pr_debug("CPU %d exiting\n", cpu_num);
intel_pstate_clear_update_util_hook(cpu_num);
@@ -1410,12 +1561,20 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->cpuinfo.max_freq =
cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ intel_pstate_init_acpi_perf_limits(policy);
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);
return 0;
}
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ intel_pstate_exit_perf_limits(policy);
+
+ return 0;
+}
+
static struct cpufreq_driver intel_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
@@ -1423,6 +1582,7 @@ static struct cpufreq_driver intel_pstate_driver = {
.resume = intel_pstate_hwp_set_policy,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
+ .exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
.name = "intel_pstate",
};
@@ -1466,8 +1626,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
}
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
+#ifdef CONFIG_ACPI
static bool intel_pstate_no_acpi_pss(void)
{
@@ -1623,7 +1782,7 @@ hwp_cpu_matched:
if (intel_pstate_platform_pwr_mgmt_exists())
return -ENODEV;
- pr_info("Intel P-state driver initializing.\n");
+ pr_info("Intel P-state driver initializing\n");
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
@@ -1640,7 +1799,7 @@ hwp_cpu_matched:
intel_pstate_sysfs_expose_params();
if (hwp_active)
- pr_info("intel_pstate: HWP enabled\n");
+ pr_info("HWP enabled\n");
return rc;
out:
@@ -1666,13 +1825,19 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "disable"))
no_load = 1;
if (!strcmp(str, "no_hwp")) {
- pr_info("intel_pstate: HWP disabled\n");
+ pr_info("HWP disabled\n");
no_hwp = 1;
}
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
hwp_only = 1;
+
+#ifdef CONFIG_ACPI
+ if (!strcmp(str, "support_acpi_ppc"))
+ acpi_ppc = true;
+#endif
+
return 0;
}
early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 0f6b229afcb9..c46a12df40dd 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -21,6 +21,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -40,8 +42,6 @@
#include "longhaul.h"
-#define PFX "longhaul: "
-
#define TYPE_LONGHAUL_V1 1
#define TYPE_LONGHAUL_V2 2
#define TYPE_POWERSAVER 3
@@ -347,14 +347,13 @@ retry_loop:
freqs.new = calc_speed(longhaul_get_cpu_mult());
/* Check if requested frequency is set. */
if (unlikely(freqs.new != speed)) {
- printk(KERN_INFO PFX "Failed to set requested frequency!\n");
+ pr_info("Failed to set requested frequency!\n");
/* Revision ID = 1 but processor is expecting revision key
* equal to 0. Jumpers at the bottom of processor will change
* multiplier and FSB, but will not change bits in Longhaul
* MSR nor enable voltage scaling. */
if (!revid_errata) {
- printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
- "option.\n");
+ pr_info("Enabling \"Ignore Revision ID\" option\n");
revid_errata = 1;
msleep(200);
goto retry_loop;
@@ -364,11 +363,10 @@ retry_loop:
* but it doesn't change frequency. I tried poking various
* bits in northbridge registers, but without success. */
if (longhaul_flags & USE_ACPI_C3) {
- printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
+ pr_info("Disabling ACPI C3 support\n");
longhaul_flags &= ~USE_ACPI_C3;
if (revid_errata) {
- printk(KERN_INFO PFX "Disabling \"Ignore "
- "Revision ID\" option.\n");
+ pr_info("Disabling \"Ignore Revision ID\" option\n");
revid_errata = 0;
}
msleep(200);
@@ -379,7 +377,7 @@ retry_loop:
* RevID = 1. RevID errata will make things right. Just
* to be 100% sure. */
if (longhaul_version == TYPE_LONGHAUL_V2) {
- printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
+ pr_info("Switching to Longhaul ver. 1\n");
longhaul_version = TYPE_LONGHAUL_V1;
msleep(200);
goto retry_loop;
@@ -387,8 +385,7 @@ retry_loop:
}
if (!bm_timeout) {
- printk(KERN_INFO PFX "Warning: Timeout while waiting for "
- "idle PCI bus.\n");
+ pr_info("Warning: Timeout while waiting for idle PCI bus\n");
return -EBUSY;
}
@@ -433,12 +430,12 @@ static int longhaul_get_ranges(void)
/* Get current frequency */
mult = longhaul_get_cpu_mult();
if (mult == -1) {
- printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
+ pr_info("Invalid (reserved) multiplier!\n");
return -EINVAL;
}
fsb = guess_fsb(mult);
if (fsb == 0) {
- printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
+ pr_info("Invalid (reserved) FSB!\n");
return -EINVAL;
}
/* Get max multiplier - as we always did.
@@ -468,11 +465,11 @@ static int longhaul_get_ranges(void)
print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
- printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
+ pr_info("highestspeed == lowest, aborting\n");
return -EINVAL;
}
if (lowest_speed > highest_speed) {
- printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
+ pr_info("nonsense! lowest (%d > %d) !\n",
lowest_speed, highest_speed);
return -EINVAL;
}
@@ -538,16 +535,16 @@ static void longhaul_setup_voltagescaling(void)
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
- printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
+ pr_info("Voltage scaling not supported by CPU\n");
return;
}
if (!longhaul.bits.VRMRev) {
- printk(KERN_INFO PFX "VRM 8.5\n");
+ pr_info("VRM 8.5\n");
vrm_mV_table = &vrm85_mV[0];
mV_vrm_table = &mV_vrm85[0];
} else {
- printk(KERN_INFO PFX "Mobile VRM\n");
+ pr_info("Mobile VRM\n");
if (cpu_model < CPU_NEHEMIAH)
return;
vrm_mV_table = &mobilevrm_mV[0];
@@ -558,27 +555,21 @@ static void longhaul_setup_voltagescaling(void)
maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
- printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
- "Voltage scaling disabled.\n",
- minvid.mV/1000, minvid.mV%1000,
- maxvid.mV/1000, maxvid.mV%1000);
+ pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n",
+ minvid.mV/1000, minvid.mV%1000,
+ maxvid.mV/1000, maxvid.mV%1000);
return;
}
if (minvid.mV == maxvid.mV) {
- printk(KERN_INFO PFX "Claims to support voltage scaling but "
- "min & max are both %d.%03d. "
- "Voltage scaling disabled\n",
- maxvid.mV/1000, maxvid.mV%1000);
+ pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n",
+ maxvid.mV/1000, maxvid.mV%1000);
return;
}
/* How many voltage steps*/
numvscales = maxvid.pos - minvid.pos + 1;
- printk(KERN_INFO PFX
- "Max VID=%d.%03d "
- "Min VID=%d.%03d, "
- "%d possible voltage scales\n",
+ pr_info("Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n",
maxvid.mV/1000, maxvid.mV%1000,
minvid.mV/1000, minvid.mV%1000,
numvscales);
@@ -617,12 +608,12 @@ static void longhaul_setup_voltagescaling(void)
pos = minvid.pos;
freq_pos->driver_data |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
- printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
+ pr_info("f: %d kHz, index: %d, vid: %d mV\n",
speed, (int)(freq_pos - longhaul_table), vid.mV);
}
can_scale_voltage = 1;
- printk(KERN_INFO PFX "Voltage scaling enabled.\n");
+ pr_info("Voltage scaling enabled\n");
}
@@ -720,8 +711,7 @@ static int enable_arbiter_disable(void)
pci_write_config_byte(dev, reg, pci_cmd);
pci_read_config_byte(dev, reg, &pci_cmd);
if (!(pci_cmd & 1<<7)) {
- printk(KERN_ERR PFX
- "Can't enable access to port 0x22.\n");
+ pr_err("Can't enable access to port 0x22\n");
status = 0;
}
}
@@ -758,8 +748,7 @@ static int longhaul_setup_southbridge(void)
if (pci_cmd & 1 << 7) {
pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
acpi_regs_addr &= 0xff00;
- printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
- acpi_regs_addr);
+ pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr);
}
pci_dev_put(dev);
@@ -853,14 +842,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_version = TYPE_LONGHAUL_V1;
}
- printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
+ pr_info("VIA %s CPU detected. ", cpuname);
switch (longhaul_version) {
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
- printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
+ pr_cont("Longhaul v%d supported\n", longhaul_version);
break;
case TYPE_POWERSAVER:
- printk(KERN_CONT "Powersaver supported.\n");
+ pr_cont("Powersaver supported\n");
break;
};
@@ -889,15 +878,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if (!(longhaul_flags & USE_ACPI_C3
|| longhaul_flags & USE_NORTHBRIDGE)
&& ((pr == NULL) || !(pr->flags.bm_control))) {
- printk(KERN_ERR PFX
- "No ACPI support. Unsupported northbridge.\n");
+ pr_err("No ACPI support: Unsupported northbridge\n");
return -ENODEV;
}
if (longhaul_flags & USE_NORTHBRIDGE)
- printk(KERN_INFO PFX "Using northbridge support.\n");
+ pr_info("Using northbridge support\n");
if (longhaul_flags & USE_ACPI_C3)
- printk(KERN_INFO PFX "Using ACPI support.\n");
+ pr_info("Using ACPI support\n");
ret = longhaul_get_ranges();
if (ret != 0)
@@ -934,20 +922,18 @@ static int __init longhaul_init(void)
return -ENODEV;
if (!enable) {
- printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+ pr_err("Option \"enable\" not set - Aborting\n");
return -ENODEV;
}
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
- printk(KERN_ERR PFX "More than 1 CPU detected, "
- "longhaul disabled.\n");
+ pr_err("More than 1 CPU detected, longhaul disabled\n");
return -ENODEV;
}
#endif
#ifdef CONFIG_X86_IO_APIC
- if (cpu_has_apic) {
- printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
- "broken in this configuration.\n");
+ if (boot_cpu_has(X86_FEATURE_APIC)) {
+ pr_err("APIC detected. Longhaul is currently broken in this configuration.\n");
return -ENODEV;
}
#endif
@@ -955,7 +941,7 @@ static int __init longhaul_init(void)
case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver);
case 10:
- printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
+ pr_err("Use acpi-cpufreq driver for VIA C7\n");
default:
;
}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index cd593c1f66dc..6bbdac1065ff 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -10,6 +10,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -76,7 +79,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpuclk = clk_get(NULL, "cpu_clk");
if (IS_ERR(cpuclk)) {
- printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+ pr_err("couldn't get CPU clk\n");
return PTR_ERR(cpuclk);
}
@@ -163,7 +166,7 @@ static int __init cpufreq_init(void)
if (ret)
return ret;
- pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
+ pr_info("Loongson-2F CPU frequency driver\n");
cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index cc3408fc073f..d9df89392b84 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -13,6 +13,8 @@
#undef DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -174,7 +176,7 @@ static int __init maple_cpufreq_init(void)
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
- printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
+ pr_err("Can't find any CPU 0 node\n");
goto bail_noprops;
}
@@ -182,8 +184,7 @@ static int __init maple_cpufreq_init(void)
/* we actually don't care on which CPU to access PVR */
pvr_hi = PVR_VER(mfspr(SPRN_PVR));
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
- printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
- pvr_hi);
+ pr_err("Unsupported CPU version (%x)\n", pvr_hi);
goto bail_noprops;
}
@@ -222,8 +223,8 @@ static int __init maple_cpufreq_init(void)
maple_pmode_cur = -1;
maple_scom_switch_freq(maple_scom_query_freq());
- printk(KERN_INFO "Registering Maple CPU frequency driver\n");
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ pr_info("Registering Maple CPU frequency driver\n");
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
maple_cpu_freqs[1].frequency/1000,
maple_cpu_freqs[0].frequency/1000,
maple_cpu_freqs[maple_pmode_cur].frequency/1000);
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 2058e6d292ce..6f602c7a71bd 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -59,11 +59,8 @@ static LIST_HEAD(dvfs_info_list);
static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
{
struct mtk_cpu_dvfs_info *info;
- struct list_head *list;
-
- list_for_each(list, &dvfs_info_list) {
- info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
+ list_for_each_entry(info, &dvfs_info_list, list_head) {
if (cpumask_test_cpu(cpu, &info->cpus))
return info;
}
@@ -524,8 +521,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = {
static int mt8173_cpufreq_probe(struct platform_device *pdev)
{
- struct mtk_cpu_dvfs_info *info;
- struct list_head *list, *tmp;
+ struct mtk_cpu_dvfs_info *info, *tmp;
int cpu, ret;
for_each_possible_cpu(cpu) {
@@ -559,11 +555,9 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev)
return 0;
release_dvfs_info_list:
- list_for_each_safe(list, tmp, &dvfs_info_list) {
- info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
-
+ list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
mtk_cpu_dvfs_info_release(info);
- list_del(list);
+ list_del(&info->list_head);
}
return ret;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
new file mode 100644
index 000000000000..e920889b9ac2
--- /dev/null
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -0,0 +1,107 @@
+/*
+ * CPUFreq support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012-2016 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "mvebu-pmsu: " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/resource.h>
+
+static int __init armada_xp_pmsu_cpufreq_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret, cpu;
+
+ if (!of_machine_is_compatible("marvell,armadaxp"))
+ return 0;
+
+ /*
+ * In order to have proper cpufreq handling, we need to ensure
+ * that the Device Tree description of the CPU clock includes
+ * the definition of the PMU DFS registers. If not, we do not
+ * register the clock notifier and the cpufreq driver. This
+ * piece of code is only for compatibility with old Device
+ * Trees.
+ */
+ np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
+ if (!np)
+ return 0;
+
+ ret = of_address_to_resource(np, 1, &res);
+ if (ret) {
+ pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
+ of_node_put(np);
+ return 0;
+ }
+
+ of_node_put(np);
+
+ /*
+ * For each CPU, this loop registers the operating points
+ * supported (which are the nominal CPU frequency and half of
+ * it), and registers the clock notifier that will take care
+ * of doing the PMSU part of a frequency transition.
+ */
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev;
+ struct clk *clk;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU %d\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Cannot get clock for CPU %d\n", cpu);
+ return PTR_ERR(clk);
+ }
+
+ /*
+ * In case of a failure of dev_pm_opp_add(), we don't
+ * bother with cleaning up the registered OPP (there's
+ * no function to do so), and simply cancel the
+ * registration of the cpufreq device.
+ */
+ ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
+ cpumask_of(cpu_dev->id));
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ }
+
+ platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ return 0;
+}
+device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index e3866e0d5bf8..cead9bec4843 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -13,6 +13,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -163,13 +166,13 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
{
mpu_dev = get_cpu_device(0);
if (!mpu_dev) {
- pr_warning("%s: unable to get the mpu device\n", __func__);
+ pr_warn("%s: unable to get the MPU device\n", __func__);
return -EINVAL;
}
mpu_reg = regulator_get(mpu_dev, "vcc");
if (IS_ERR(mpu_reg)) {
- pr_warning("%s: unable to get MPU regulator\n", __func__);
+ pr_warn("%s: unable to get MPU regulator\n", __func__);
mpu_reg = NULL;
} else {
/*
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 5dd95dab580d..fd77812313f3 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -35,8 +37,6 @@
#include "speedstep-lib.h"
-#define PFX "p4-clockmod: "
-
/*
* Duty Cycle (3bits), note DC_DISABLE is not specified in
* intel docs i just use it to mean disable
@@ -124,11 +124,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
- printk_once(KERN_WARNING PFX "Warning: EST-capable "
- "CPU detected. The acpi-cpufreq module offers "
- "voltage scaling in addition to frequency "
- "scaling. You should use that instead of "
- "p4-clockmod, if possible.\n");
+ pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
switch (c->x86_model) {
case 0x0E: /* Core */
case 0x0F: /* Core Duo */
@@ -152,11 +148,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
- printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
- "The speedstep-ich or acpi cpufreq modules offer "
- "voltage scaling in addition of frequency scaling. "
- "You should use either one instead of p4-clockmod, "
- "if possible.\n");
+ pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
}
@@ -265,8 +257,7 @@ static int __init cpufreq_p4_init(void)
ret = cpufreq_register_driver(&p4clockmod_driver);
if (!ret)
- printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
- "Modulation available\n");
+ pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
return ret;
}
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 1f49d97a70ea..b7b576e53e92 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -481,13 +483,13 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
lenp /= sizeof(u32);
if (freqs == NULL || lenp != 2) {
- printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+ pr_err("bus-frequencies incorrect or missing\n");
return 1;
}
ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
NULL);
if (ratio == NULL) {
- printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+ pr_err("processor-to-bus-ratio*2 missing\n");
return 1;
}
@@ -550,7 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
if (!voltage_gpio){
- printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+ pr_err("missing cpu-vcore-select gpio\n");
return 1;
}
@@ -675,9 +677,9 @@ out:
pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
ppc_proc_freq = cur_freq * 1000ul;
- printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
- low_freq/1000, hi_freq/1000, cur_freq/1000);
+ pr_info("Registering PowerMac CPU frequency driver\n");
+ pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+ low_freq/1000, hi_freq/1000, cur_freq/1000);
return cpufreq_register_driver(&pmac_cpufreq_driver);
}
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 4ff86878727f..267e0894c62d 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -12,6 +12,8 @@
#undef DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -138,7 +140,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
usleep_range(1000, 1000);
}
if (done == 0)
- printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+ pr_warn("Timeout in clock slewing !\n");
}
@@ -266,7 +268,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
if (rc)
- printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
+ pr_warn("pfunc switch error %d\n", rc);
/* It's an irq GPIO so we should be able to just block here,
* I'll do that later after I've properly tested the IRQ code for
@@ -282,7 +284,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
usleep_range(500, 500);
}
if (done == 0)
- printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+ pr_warn("Timeout in clock slewing !\n");
/* If frequency is going down, last ramp the voltage */
if (speed_mode > g5_pmode_cur)
@@ -368,7 +370,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
}
pvr_hi = (*valp) >> 16;
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
- printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
+ pr_err("Unsupported CPU version\n");
goto bail_noprops;
}
@@ -403,8 +405,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
root = of_find_node_by_path("/");
if (root == NULL) {
- printk(KERN_ERR "cpufreq: Can't find root of "
- "device tree\n");
+ pr_err("Can't find root of device tree\n");
goto bail_noprops;
}
pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
@@ -412,8 +413,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
pmf_find_function(root, "slewing-done");
if (pfunc_set_vdnap0 == NULL ||
pfunc_vdnap0_complete == NULL) {
- printk(KERN_ERR "cpufreq: Can't find required "
- "platform function\n");
+ pr_err("Can't find required platform function\n");
goto bail_noprops;
}
@@ -453,10 +453,10 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
- printk(KERN_INFO "Registering G5 CPU frequency driver\n");
- printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
- freq_method, volt_method);
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ pr_info("Registering G5 CPU frequency driver\n");
+ pr_info("Frequency method: %s, Voltage method: %s\n",
+ freq_method, volt_method);
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -493,7 +493,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
if (cpuid != NULL)
eeprom = of_get_property(cpuid, "cpuid", NULL);
if (eeprom == NULL) {
- printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
+ pr_err("Can't find cpuid EEPROM !\n");
rc = -ENODEV;
goto bail;
}
@@ -511,7 +511,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
break;
}
if (hwclock == NULL) {
- printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
+ pr_err("Can't find i2c clock chip !\n");
rc = -ENODEV;
goto bail;
}
@@ -539,7 +539,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Check we have minimum requirements */
if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
- printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
+ pr_err("Can't find platform functions !\n");
rc = -ENODEV;
goto bail;
}
@@ -567,7 +567,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Get max frequency from device-tree */
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp) {
- printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
+ pr_err("Can't find CPU frequency !\n");
rc = -ENODEV;
goto bail;
}
@@ -583,8 +583,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Check for machines with no useful settings */
if (il == ih) {
- printk(KERN_WARNING "cpufreq: No low frequency mode available"
- " on this model !\n");
+ pr_warn("No low frequency mode available on this model !\n");
rc = -ENODEV;
goto bail;
}
@@ -595,7 +594,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Sanity check */
if (min_freq >= max_freq || min_freq < 1000) {
- printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
+ pr_err("Can't calculate low frequency !\n");
rc = -ENXIO;
goto bail;
}
@@ -619,10 +618,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
- printk(KERN_INFO "Registering G5 CPU frequency driver\n");
- printk(KERN_INFO "Frequency method: i2c/pfunc, "
- "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ pr_info("Registering G5 CPU frequency driver\n");
+ pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n",
+ has_volt ? "i2c/pfunc" : "none");
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -654,7 +653,7 @@ static int __init g5_cpufreq_init(void)
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
- pr_err("cpufreq: Can't find any CPU node\n");
+ pr_err("Can't find any CPU node\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index e6f24b281e3e..dedd2568e852 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -8,6 +8,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -22,7 +24,6 @@
#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
as it is unused */
-#define PFX "powernow-k6: "
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
@@ -141,7 +142,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
{
if (clock_ratio[best_i].driver_data > max_multiplier) {
- printk(KERN_ERR PFX "invalid target frequency\n");
+ pr_err("invalid target frequency\n");
return -EINVAL;
}
@@ -175,13 +176,14 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
max_multiplier = param_max_multiplier;
goto have_max_multiplier;
}
- printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+ pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
return -EINVAL;
}
if (!max_multiplier) {
- printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
- printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+ pr_warn("unknown frequency %u, cannot determine current multiplier\n",
+ khz);
+ pr_warn("use module parameters max_multiplier and bus_frequency\n");
return -EOPNOTSUPP;
}
@@ -193,7 +195,7 @@ have_max_multiplier:
busfreq = param_busfreq / 10;
goto have_busfreq;
}
- printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+ pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
return -EINVAL;
}
@@ -275,7 +277,7 @@ static int __init powernow_k6_init(void)
return -ENODEV;
if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
- printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
+ pr_info("PowerNow IOPORT region already used\n");
return -EIO;
}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index c1ae1999770a..9f013ed42977 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -13,6 +13,8 @@
* - We disable half multipliers if ACPI is used on A0 stepping CPUs.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -35,9 +37,6 @@
#include "powernow-k7.h"
-#define PFX "powernow: "
-
-
struct psb_s {
u8 signature[10];
u8 tableversion;
@@ -127,14 +126,13 @@ static int check_powernow(void)
maxei = cpuid_eax(0x80000000);
if (maxei < 0x80000007) { /* Any powernow info ? */
#ifdef MODULE
- printk(KERN_INFO PFX "No powernow capabilities detected\n");
+ pr_info("No powernow capabilities detected\n");
#endif
return 0;
}
if ((c->x86_model == 6) && (c->x86_mask == 0)) {
- printk(KERN_INFO PFX "K7 660[A0] core detected, "
- "enabling errata workarounds\n");
+ pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
have_a0 = 1;
}
@@ -144,22 +142,22 @@ static int check_powernow(void)
if (!(edx & (1 << 1 | 1 << 2)))
return 0;
- printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
+ pr_info("PowerNOW! Technology present. Can scale: ");
if (edx & 1 << 1) {
- printk("frequency");
+ pr_cont("frequency");
can_scale_bus = 1;
}
if ((edx & (1 << 1 | 1 << 2)) == 0x6)
- printk(" and ");
+ pr_cont(" and ");
if (edx & 1 << 2) {
- printk("voltage");
+ pr_cont("voltage");
can_scale_vid = 1;
}
- printk(".\n");
+ pr_cont("\n");
return 1;
}
@@ -427,16 +425,14 @@ err1:
err05:
kfree(acpi_processor_perf);
err0:
- printk(KERN_WARNING PFX "ACPI perflib can not be used on "
- "this platform\n");
+ pr_warn("ACPI perflib can not be used on this platform\n");
acpi_processor_perf = NULL;
return retval;
}
#else
static int powernow_acpi_init(void)
{
- printk(KERN_INFO PFX "no support for ACPI processor found."
- " Please recompile your kernel with ACPI processor\n");
+ pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n");
return -EINVAL;
}
#endif
@@ -468,8 +464,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
psb = (struct psb_s *) p;
pr_debug("Table version: 0x%x\n", psb->tableversion);
if (psb->tableversion != 0x12) {
- printk(KERN_INFO PFX "Sorry, only v1.2 tables"
- " supported right now\n");
+ pr_info("Sorry, only v1.2 tables supported right now\n");
return -ENODEV;
}
@@ -481,10 +476,8 @@ static int powernow_decode_bios(int maxfid, int startvid)
latency = psb->settlingtime;
if (latency < 100) {
- printk(KERN_INFO PFX "BIOS set settling time "
- "to %d microseconds. "
- "Should be at least 100. "
- "Correcting.\n", latency);
+ pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n",
+ latency);
latency = 100;
}
pr_debug("Settling Time: %d microseconds.\n",
@@ -516,10 +509,9 @@ static int powernow_decode_bios(int maxfid, int startvid)
p += 2;
}
}
- printk(KERN_INFO PFX "No PST tables match this cpuid "
- "(0x%x)\n", etuple);
- printk(KERN_INFO PFX "This is indicative of a broken "
- "BIOS.\n");
+ pr_info("No PST tables match this cpuid (0x%x)\n",
+ etuple);
+ pr_info("This is indicative of a broken BIOS\n");
return -EINVAL;
}
@@ -552,7 +544,7 @@ static int fixup_sgtc(void)
sgtc = 100 * m * latency;
sgtc = sgtc / 3;
if (sgtc > 0xfffff) {
- printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
+ pr_warn("SGTC too large %d\n", sgtc);
sgtc = 0xfffff;
}
return sgtc;
@@ -574,14 +566,10 @@ static unsigned int powernow_get(unsigned int cpu)
static int acer_cpufreq_pst(const struct dmi_system_id *d)
{
- printk(KERN_WARNING PFX
- "%s laptop with broken PST tables in BIOS detected.\n",
+ pr_warn("%s laptop with broken PST tables in BIOS detected\n",
d->ident);
- printk(KERN_WARNING PFX
- "You need to downgrade to 3A21 (09/09/2002), or try a newer "
- "BIOS than 3A71 (01/20/2003)\n");
- printk(KERN_WARNING PFX
- "cpufreq scaling has been disabled as a result of this.\n");
+ pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
+ pr_warn("cpufreq scaling has been disabled as a result of this\n");
return 0;
}
@@ -616,40 +604,38 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
if (!fsb) {
- printk(KERN_WARNING PFX "can not determine bus frequency\n");
+ pr_warn("can not determine bus frequency\n");
return -EINVAL;
}
pr_debug("FSB: %3dMHz\n", fsb/1000);
if (dmi_check_system(powernow_dmi_table) || acpi_force) {
- printk(KERN_INFO PFX "PSB/PST known to be broken. "
- "Trying ACPI instead\n");
+ pr_info("PSB/PST known to be broken - trying ACPI instead\n");
result = powernow_acpi_init();
} else {
result = powernow_decode_bios(fidvidstatus.bits.MFID,
fidvidstatus.bits.SVID);
if (result) {
- printk(KERN_INFO PFX "Trying ACPI perflib\n");
+ pr_info("Trying ACPI perflib\n");
maximum_speed = 0;
minimum_speed = -1;
latency = 0;
result = powernow_acpi_init();
if (result) {
- printk(KERN_INFO PFX
- "ACPI and legacy methods failed\n");
+ pr_info("ACPI and legacy methods failed\n");
}
} else {
/* SGTC use the bus clock as timer */
latency = fixup_sgtc();
- printk(KERN_INFO PFX "SGTC: %d\n", latency);
+ pr_info("SGTC: %d\n", latency);
}
}
if (result)
return result;
- printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
- minimum_speed/1000, maximum_speed/1000);
+ pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n",
+ minimum_speed/1000, maximum_speed/1000);
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 39ac78c94be0..54c45368e3f1 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -36,12 +36,56 @@
#include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#include <asm/opal.h>
+#include <linux/timer.h>
#define POWERNV_MAX_PSTATES 256
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define MAX_RAMP_DOWN_TIME 5120
+/*
+ * On an idle system we want the global pstate to ramp-down from max value to
+ * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
+ * then ramp-down rapidly later on.
+ *
+ * This gives a percentage rampdown for time elapsed in milliseconds.
+ * ramp_down_percentage = ((ms * ms) >> 18)
+ * ~= 3.8 * (sec * sec)
+ *
+ * At 0 ms ramp_down_percent = 0
+ * At 5120 ms ramp_down_percent = 100
+ */
+#define ramp_down_percent(time) ((time * time) >> 18)
+
+/* Interval after which the timer is queued to bring down global pstate */
+#define GPSTATE_TIMER_INTERVAL 2000
+
+/**
+ * struct global_pstate_info - Per policy data structure to maintain history of
+ * global pstates
+ * @highest_lpstate: The local pstate from which we are ramping down
+ * @elapsed_time: Time in ms spent in ramping down from
+ * highest_lpstate
+ * @last_sampled_time: Time from boot in ms when global pstates were
+ * last set
+ * @last_lpstate,last_gpstate: Last set values for local and global pstates
+ * @timer: Is used for ramping down if cpu goes idle for
+ * a long time with global pstate held high
+ * @gpstate_lock: A spinlock to maintain synchronization between
+ * routines called by the timer handler and
+ * governer's target_index calls
+ */
+struct global_pstate_info {
+ int highest_lpstate;
+ unsigned int elapsed_time;
+ unsigned int last_sampled_time;
+ int last_lpstate;
+ int last_gpstate;
+ spinlock_t gpstate_lock;
+ struct timer_list timer;
+};
+
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting, throttled, occ_reset;
@@ -94,6 +138,17 @@ static struct powernv_pstate_info {
int nr_pstates;
} powernv_pstate_info;
+static inline void reset_gpstates(struct cpufreq_policy *policy)
+{
+ struct global_pstate_info *gpstates = policy->driver_data;
+
+ gpstates->highest_lpstate = 0;
+ gpstates->elapsed_time = 0;
+ gpstates->last_sampled_time = 0;
+ gpstates->last_lpstate = 0;
+ gpstates->last_gpstate = 0;
+}
+
/*
* Initialize the freq table based on data obtained
* from the firmware passed via device-tree
@@ -285,6 +340,7 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
struct powernv_smp_call_data {
unsigned int freq;
int pstate_id;
+ int gpstate_id;
};
/*
@@ -343,19 +399,21 @@ static unsigned int powernv_cpufreq_get(unsigned int cpu)
* (struct powernv_smp_call_data *) and the pstate_id which needs to be set
* on this CPU should be present in freq_data->pstate_id.
*/
-static void set_pstate(void *freq_data)
+static void set_pstate(void *data)
{
unsigned long val;
- unsigned long pstate_ul =
- ((struct powernv_smp_call_data *) freq_data)->pstate_id;
+ struct powernv_smp_call_data *freq_data = data;
+ unsigned long pstate_ul = freq_data->pstate_id;
+ unsigned long gpstate_ul = freq_data->gpstate_id;
val = get_pmspr(SPRN_PMCR);
val = val & 0x0000FFFFFFFFFFFFULL;
pstate_ul = pstate_ul & 0xFF;
+ gpstate_ul = gpstate_ul & 0xFF;
/* Set both global(bits 56..63) and local(bits 48..55) PStates */
- val = val | (pstate_ul << 56) | (pstate_ul << 48);
+ val = val | (gpstate_ul << 56) | (pstate_ul << 48);
pr_debug("Setting cpu %d pmcr to %016lX\n",
raw_smp_processor_id(), val);
@@ -424,6 +482,111 @@ next:
}
}
+/**
+ * calc_global_pstate - Calculate global pstate
+ * @elapsed_time: Elapsed time in milliseconds
+ * @local_pstate: New local pstate
+ * @highest_lpstate: pstate from which its ramping down
+ *
+ * Finds the appropriate global pstate based on the pstate from which its
+ * ramping down and the time elapsed in ramping down. It follows a quadratic
+ * equation which ensures that it reaches ramping down to pmin in 5sec.
+ */
+static inline int calc_global_pstate(unsigned int elapsed_time,
+ int highest_lpstate, int local_pstate)
+{
+ int pstate_diff;
+
+ /*
+ * Using ramp_down_percent we get the percentage of rampdown
+ * that we are expecting to be dropping. Difference between
+ * highest_lpstate and powernv_pstate_info.min will give a absolute
+ * number of how many pstates we will drop eventually by the end of
+ * 5 seconds, then just scale it get the number pstates to be dropped.
+ */
+ pstate_diff = ((int)ramp_down_percent(elapsed_time) *
+ (highest_lpstate - powernv_pstate_info.min)) / 100;
+
+ /* Ensure that global pstate is >= to local pstate */
+ if (highest_lpstate - pstate_diff < local_pstate)
+ return local_pstate;
+ else
+ return highest_lpstate - pstate_diff;
+}
+
+static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
+{
+ unsigned int timer_interval;
+
+ /*
+ * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
+ * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
+ * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
+ * seconds of ramp down time.
+ */
+ if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
+ > MAX_RAMP_DOWN_TIME)
+ timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
+ else
+ timer_interval = GPSTATE_TIMER_INTERVAL;
+
+ mod_timer_pinned(&gpstates->timer, jiffies +
+ msecs_to_jiffies(timer_interval));
+}
+
+/**
+ * gpstate_timer_handler
+ *
+ * @data: pointer to cpufreq_policy on which timer was queued
+ *
+ * This handler brings down the global pstate closer to the local pstate
+ * according quadratic equation. Queues a new timer if it is still not equal
+ * to local pstate
+ */
+void gpstate_timer_handler(unsigned long data)
+{
+ struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+ struct global_pstate_info *gpstates = policy->driver_data;
+ int gpstate_id;
+ unsigned int time_diff = jiffies_to_msecs(jiffies)
+ - gpstates->last_sampled_time;
+ struct powernv_smp_call_data freq_data;
+
+ if (!spin_trylock(&gpstates->gpstate_lock))
+ return;
+
+ gpstates->last_sampled_time += time_diff;
+ gpstates->elapsed_time += time_diff;
+ freq_data.pstate_id = gpstates->last_lpstate;
+
+ if ((gpstates->last_gpstate == freq_data.pstate_id) ||
+ (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
+ gpstate_id = freq_data.pstate_id;
+ reset_gpstates(policy);
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ } else {
+ gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate,
+ freq_data.pstate_id);
+ }
+
+ /*
+ * If local pstate is equal to global pstate, rampdown is over
+ * So timer is not required to be queued.
+ */
+ if (gpstate_id != freq_data.pstate_id)
+ queue_gpstate_timer(gpstates);
+
+ freq_data.gpstate_id = gpstate_id;
+ gpstates->last_gpstate = freq_data.gpstate_id;
+ gpstates->last_lpstate = freq_data.pstate_id;
+
+ spin_unlock(&gpstates->gpstate_lock);
+
+ /* Timer may get migrated to a different cpu on cpu hot unplug */
+ smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+}
+
/*
* powernv_cpufreq_target_index: Sets the frequency corresponding to
* the cpufreq table entry indexed by new_index on the cpus in the
@@ -433,6 +596,8 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
unsigned int new_index)
{
struct powernv_smp_call_data freq_data;
+ unsigned int cur_msec, gpstate_id;
+ struct global_pstate_info *gpstates = policy->driver_data;
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
@@ -440,28 +605,81 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (!throttled)
powernv_cpufreq_throttle_check(NULL);
+ cur_msec = jiffies_to_msecs(get_jiffies_64());
+
+ spin_lock(&gpstates->gpstate_lock);
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+ if (!gpstates->last_sampled_time) {
+ gpstate_id = freq_data.pstate_id;
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ goto gpstates_done;
+ }
+
+ if (gpstates->last_gpstate > freq_data.pstate_id) {
+ gpstates->elapsed_time += cur_msec -
+ gpstates->last_sampled_time;
+
+ /*
+ * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
+ * we should be resetting all global pstate related data. Set it
+ * equal to local pstate to start fresh.
+ */
+ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
+ reset_gpstates(policy);
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstate_id = freq_data.pstate_id;
+ } else {
+ /* Elaspsed_time is less than 5 seconds, continue to rampdown */
+ gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate,
+ freq_data.pstate_id);
+ }
+ } else {
+ reset_gpstates(policy);
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstate_id = freq_data.pstate_id;
+ }
+
+ /*
+ * If local pstate is equal to global pstate, rampdown is over
+ * So timer is not required to be queued.
+ */
+ if (gpstate_id != freq_data.pstate_id)
+ queue_gpstate_timer(gpstates);
+ else
+ del_timer_sync(&gpstates->timer);
+
+gpstates_done:
+ freq_data.gpstate_id = gpstate_id;
+ gpstates->last_sampled_time = cur_msec;
+ gpstates->last_gpstate = freq_data.gpstate_id;
+ gpstates->last_lpstate = freq_data.pstate_id;
+
+ spin_unlock(&gpstates->gpstate_lock);
+
/*
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
* if current CPU is within policy->cpus (core)
*/
smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
-
return 0;
}
static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int base, i;
+ int base, i, ret;
+ struct kernfs_node *kn;
+ struct global_pstate_info *gpstates;
base = cpu_first_thread_sibling(policy->cpu);
for (i = 0; i < threads_per_core; i++)
cpumask_set_cpu(base + i, policy->cpus);
- if (!policy->driver_data) {
+ kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
+ if (!kn) {
int ret;
ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
@@ -470,13 +688,37 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cpu);
return ret;
}
- /*
- * policy->driver_data is used as a flag for one-time
- * creation of throttle sysfs files.
- */
- policy->driver_data = policy;
+ } else {
+ kernfs_put(kn);
}
- return cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+ gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
+ if (!gpstates)
+ return -ENOMEM;
+
+ policy->driver_data = gpstates;
+
+ /* initialize timer */
+ init_timer_deferrable(&gpstates->timer);
+ gpstates->timer.data = (unsigned long)policy;
+ gpstates->timer.function = gpstate_timer_handler;
+ gpstates->timer.expires = jiffies +
+ msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
+ spin_lock_init(&gpstates->gpstate_lock);
+ ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+ if (ret < 0)
+ kfree(policy->driver_data);
+
+ return ret;
+}
+
+static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ /* timer is deleted in cpufreq_cpu_stop() */
+ kfree(policy->driver_data);
+
+ return 0;
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
@@ -604,15 +846,19 @@ static struct notifier_block powernv_cpufreq_opal_nb = {
static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
+ struct global_pstate_info *gpstates = policy->driver_data;
freq_data.pstate_id = powernv_pstate_info.min;
+ freq_data.gpstate_id = powernv_pstate_info.min;
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+ del_timer_sync(&gpstates->timer);
}
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = powernv_cpufreq_cpu_init,
+ .exit = powernv_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
.get = powernv_cpufreq_get,
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
index b4c00a5a6a59..3eace725ccd6 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -17,7 +17,7 @@ int cbe_cpufreq_get_pmode(int cpu);
int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
+#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
extern bool cbe_cpufreq_has_pmi;
#else
#define cbe_cpufreq_has_pmi (0)
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 7969f7690498..7c4cd5c634f2 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/processor.h>
@@ -142,15 +142,4 @@ static int __init cbe_cpufreq_pmi_init(void)
return 0;
}
-
-static void __exit cbe_cpufreq_pmi_exit(void)
-{
- cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
- pmi_unregister_handler(&cbe_pmi_handler);
-}
-
-module_init(cbe_cpufreq_pmi_init);
-module_exit(cbe_cpufreq_pmi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
+device_initcall(cbe_cpufreq_pmi_init);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 46fee1539cc8..ce345bf34d5d 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -29,6 +29,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -186,8 +188,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
ret = regulator_set_voltage(vcc_core, vmin, vmax);
if (ret)
- pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
- vmin, vmax);
+ pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax);
return ret;
}
@@ -195,10 +196,10 @@ static void __init pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
- pr_info("cpufreq: Didn't find vcc_core regulator\n");
+ pr_info("Didn't find vcc_core regulator\n");
vcc_core = NULL;
} else {
- pr_info("cpufreq: Found vcc_core regulator\n");
+ pr_info("Found vcc_core regulator\n");
}
}
#else
@@ -233,9 +234,8 @@ static void pxa27x_guess_max_freq(void)
{
if (!pxa27x_maxfreq) {
pxa27x_maxfreq = 416000;
- printk(KERN_INFO "PXA CPU 27x max frequency not defined "
- "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
- pxa27x_maxfreq);
+ pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
+ pxa27x_maxfreq);
} else {
pxa27x_maxfreq *= 1000;
}
@@ -408,7 +408,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
*/
if (cpu_is_pxa25x()) {
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
- pr_info("PXA255 cpufreq using %s frequency table\n",
+ pr_info("using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
cpufreq_table_validate_and_show(policy, pxa255_freq_table);
@@ -417,7 +417,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
}
- printk(KERN_INFO "PXA CPU frequency change support initialized\n");
+ pr_info("frequency change support initialized\n");
return 0;
}
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index b23e525a7af3..53d8c3fb16f6 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -301,10 +301,11 @@ err_np:
return -ENODEV;
}
-static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
+ cpufreq_cooling_unregister(data->cdev);
kfree(data->pclk);
kfree(data->table);
kfree(data);
@@ -333,8 +334,8 @@ static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
cpud->cdev = of_cpufreq_cooling_register(np,
policy->related_cpus);
- if (IS_ERR(cpud->cdev)) {
- pr_err("Failed to register cooling device cpu%d: %ld\n",
+ if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
+ pr_err("cpu%d is not running as cooling device: %ld\n",
policy->cpu, PTR_ERR(cpud->cdev));
cpud->cdev = NULL;
@@ -348,7 +349,7 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.name = "qoriq_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = qoriq_cpufreq_cpu_init,
- .exit = __exit_p(qoriq_cpufreq_cpu_exit),
+ .exit = qoriq_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
index eb262133fef2..b04b6f02bbdc 100644
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -197,21 +199,20 @@ static int s3c2412_cpufreq_add(struct device *dev,
hclk = clk_get(NULL, "hclk");
if (IS_ERR(hclk)) {
- printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
+ pr_err("cannot find hclk clock\n");
return -ENOENT;
}
fclk = clk_get(NULL, "fclk");
if (IS_ERR(fclk)) {
- printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
+ pr_err("cannot find fclk clock\n");
goto err_fclk;
}
fclk_rate = clk_get_rate(fclk);
if (fclk_rate > 200000000) {
- printk(KERN_INFO
- "%s: fclk %ld MHz, assuming 266MHz capable part\n",
- __func__, fclk_rate / 1000000);
+ pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
+ fclk_rate / 1000000);
s3c2412_cpufreq_info.max.fclk = 266000000;
s3c2412_cpufreq_info.max.hclk = 133000000;
s3c2412_cpufreq_info.max.pclk = 66000000;
@@ -219,13 +220,13 @@ static int s3c2412_cpufreq_add(struct device *dev,
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
- printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
+ pr_err("cannot find arm clock\n");
goto err_armclk;
}
xtal = clk_get(NULL, "xtal");
if (IS_ERR(xtal)) {
- printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
+ pr_err("cannot find xtal clock\n");
goto err_xtal;
}
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 0129f5c70a61..d0d75b65ddd6 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -66,7 +68,7 @@ static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
__func__, fclk, armclk, hclk_max);
if (armclk > fclk) {
- printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
+ pr_warn("%s: armclk > fclk\n", __func__);
armclk = fclk;
}
@@ -273,7 +275,7 @@ static int s3c2440_cpufreq_add(struct device *dev,
armclk = s3c_cpufreq_clk_get(NULL, "armclk");
if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
- printk(KERN_ERR "%s: failed to get clocks\n", __func__);
+ pr_err("%s: failed to get clocks\n", __func__);
return -ENOENT;
}
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
index 9b7b4289d66c..4d976e8dbb2f 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/export.h>
#include <linux/interrupt.h>
@@ -178,7 +180,7 @@ static int __init s3c_freq_debugfs_init(void)
{
dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
if (IS_ERR(dbgfs_root)) {
- printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
+ pr_err("%s: error creating debugfs root\n", __func__);
return PTR_ERR(dbgfs_root);
}
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 68ef8fd9482f..ae8eaed77b70 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -175,7 +177,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
cpu_new.freq.fclk = cpu_new.pll.frequency;
if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
- printk(KERN_ERR "no divisors for %d\n", target_freq);
+ pr_err("no divisors for %d\n", target_freq);
goto err_notpossible;
}
@@ -187,7 +189,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
if (s3c_cpufreq_calcio(&cpu_new) < 0) {
- printk(KERN_ERR "%s: no IO timings\n", __func__);
+ pr_err("%s: no IO timings\n", __func__);
goto err_notpossible;
}
}
@@ -262,7 +264,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
return 0;
err_notpossible:
- printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ pr_err("no compatible settings for %d\n", target_freq);
return -EINVAL;
}
@@ -331,7 +333,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
&index);
if (ret < 0) {
- printk(KERN_ERR "%s: no PLL available\n", __func__);
+ pr_err("%s: no PLL available\n", __func__);
goto err_notpossible;
}
@@ -346,7 +348,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
return s3c_cpufreq_settarget(policy, target_freq, pll);
err_notpossible:
- printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ pr_err("no compatible settings for %d\n", target_freq);
return -EINVAL;
}
@@ -356,7 +358,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
clk = clk_get(dev, name);
if (IS_ERR(clk))
- printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
+ pr_err("failed to get clock '%s'\n", name);
return clk;
}
@@ -378,15 +380,16 @@ static int __init s3c_cpufreq_initclks(void)
if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
- printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
+ pr_err("%s: could not get clock(s)\n", __func__);
return -ENOENT;
}
- printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
- clk_get_rate(clk_fclk) / 1000,
- clk_get_rate(clk_hclk) / 1000,
- clk_get_rate(clk_pclk) / 1000,
- clk_get_rate(clk_arm) / 1000);
+ pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
+ __func__,
+ clk_get_rate(clk_fclk) / 1000,
+ clk_get_rate(clk_hclk) / 1000,
+ clk_get_rate(clk_pclk) / 1000,
+ clk_get_rate(clk_arm) / 1000);
return 0;
}
@@ -424,7 +427,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
if (ret) {
- printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
+ pr_err("%s: failed to reset pll/freq\n", __func__);
return ret;
}
@@ -449,13 +452,12 @@ static struct cpufreq_driver s3c24xx_driver = {
int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
{
if (!info || !info->name) {
- printk(KERN_ERR "%s: failed to pass valid information\n",
- __func__);
+ pr_err("%s: failed to pass valid information\n", __func__);
return -EINVAL;
}
- printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
- info->name);
+ pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
+ info->name);
/* check our driver info has valid data */
@@ -478,7 +480,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
struct s3c_cpufreq_board *ours;
if (!board) {
- printk(KERN_INFO "%s: no board data\n", __func__);
+ pr_info("%s: no board data\n", __func__);
return -EINVAL;
}
@@ -487,7 +489,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
ours = kzalloc(sizeof(*ours), GFP_KERNEL);
if (ours == NULL) {
- printk(KERN_ERR "%s: no memory\n", __func__);
+ pr_err("%s: no memory\n", __func__);
return -ENOMEM;
}
@@ -502,15 +504,15 @@ static int __init s3c_cpufreq_auto_io(void)
int ret;
if (!cpu_cur.info->get_iotiming) {
- printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
+ pr_err("%s: get_iotiming undefined\n", __func__);
return -ENOENT;
}
- printk(KERN_INFO "%s: working out IO settings\n", __func__);
+ pr_info("%s: working out IO settings\n", __func__);
ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
if (ret)
- printk(KERN_ERR "%s: failed to get timings\n", __func__);
+ pr_err("%s: failed to get timings\n", __func__);
return ret;
}
@@ -561,7 +563,7 @@ static void s3c_cpufreq_update_loctkime(void)
val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
val |= calc_locktime(rate, cpu_cur.info->locktime_m);
- printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
+ pr_info("%s: new locktime is 0x%08x\n", __func__, val);
__raw_writel(val, S3C2410_LOCKTIME);
}
@@ -580,7 +582,7 @@ static int s3c_cpufreq_build_freq(void)
ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
if (!ftab) {
- printk(KERN_ERR "%s: no memory for tables\n", __func__);
+ pr_err("%s: no memory for tables\n", __func__);
return -ENOMEM;
}
@@ -608,15 +610,14 @@ static int __init s3c_cpufreq_initcall(void)
if (cpu_cur.board->auto_io) {
ret = s3c_cpufreq_auto_io();
if (ret) {
- printk(KERN_ERR "%s: failed to get io timing\n",
+ pr_err("%s: failed to get io timing\n",
__func__);
goto out;
}
}
if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
- printk(KERN_ERR "%s: no IO support registered\n",
- __func__);
+ pr_err("%s: no IO support registered\n", __func__);
ret = -EINVAL;
goto out;
}
@@ -666,9 +667,9 @@ int s3c_plltab_register(struct cpufreq_frequency_table *plls,
vals += plls_no;
vals->frequency = CPUFREQ_TABLE_END;
- printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
+ pr_info("%d PLL entries\n", plls_no);
} else
- printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
+ pr_err("no memory for PLL tables\n");
return vals ? 0 : -ENOMEM;
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index a145b319d171..06d85917b6d5 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -205,7 +207,7 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
} else if (ch == DMC1) {
reg = (dmc_base[1] + 0x30);
} else {
- printk(KERN_ERR "Cannot find DMC port\n");
+ pr_err("Cannot find DMC port\n");
return;
}
@@ -534,7 +536,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
mem_type = check_mem_type(dmc_base[0]);
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
- printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
+ pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
goto out_dmc1;
}
@@ -635,13 +637,13 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
arm_regulator = regulator_get(NULL, "vddarm");
if (IS_ERR(arm_regulator)) {
- pr_err("failed to get regulator vddarm");
+ pr_err("failed to get regulator vddarm\n");
return PTR_ERR(arm_regulator);
}
int_regulator = regulator_get(NULL, "vddint");
if (IS_ERR(int_regulator)) {
- pr_err("failed to get regulator vddint");
+ pr_err("failed to get regulator vddint\n");
regulator_put(arm_regulator);
return PTR_ERR(int_regulator);
}
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index ac84e4818014..4225501a4b78 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -13,6 +13,8 @@
* 2005-03-30: - initial revision
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -30,8 +32,6 @@
static __u8 __iomem *cpuctl;
-#define PFX "sc520_freq: "
-
static struct cpufreq_frequency_table sc520_freq_table[] = {
{0, 0x01, 100000},
{0, 0x02, 133000},
@@ -44,8 +44,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
switch (clockspeed_reg & 0x03) {
default:
- printk(KERN_ERR PFX "error: cpuctl register has unexpected "
- "value %02x\n", clockspeed_reg);
+ pr_err("error: cpuctl register has unexpected value %02x\n",
+ clockspeed_reg);
case 0x01:
return 100000;
case 0x02:
@@ -112,7 +112,7 @@ static int __init sc520_freq_init(void)
cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
if (!cpuctl) {
- printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
+ pr_err("sc520_freq: error: failed to remap memory\n");
return -ENOMEM;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index de5e89b2eaaa..e8a7bf57b31b 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -38,10 +39,20 @@ static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
return scpi_ops->dvfs_get_info(domain);
}
-static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
+static int scpi_get_transition_latency(struct device *cpu_dev)
{
- int idx, ret = 0;
+ struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+ return info->latency;
+}
+
+static int scpi_init_opp_table(const struct cpumask *cpumask)
+{
+ int idx, ret;
struct scpi_opp *opp;
+ struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
if (IS_ERR(info))
@@ -51,11 +62,7 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
return -EIO;
for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
- if (remove)
- dev_pm_opp_remove(cpu_dev, opp->freq);
- else
- ret = dev_pm_opp_add(cpu_dev, opp->freq,
- opp->m_volt * 1000);
+ ret = dev_pm_opp_add(cpu_dev, opp->freq, opp->m_volt * 1000);
if (ret) {
dev_warn(cpu_dev, "failed to add opp %uHz %umV\n",
opp->freq, opp->m_volt);
@@ -64,33 +71,19 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
return ret;
}
}
- return ret;
-}
-static int scpi_get_transition_latency(struct device *cpu_dev)
-{
- struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
-
- if (IS_ERR(info))
- return PTR_ERR(info);
- return info->latency;
-}
-
-static int scpi_init_opp_table(struct device *cpu_dev)
-{
- return scpi_opp_table_ops(cpu_dev, false);
-}
-
-static void scpi_free_opp_table(struct device *cpu_dev)
-{
- scpi_opp_table_ops(cpu_dev, true);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ return ret;
}
static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
.name = "scpi",
.get_transition_latency = scpi_get_transition_latency,
.init_opp_table = scpi_init_opp_table,
- .free_opp_table = scpi_free_opp_table,
+ .free_opp_table = dev_pm_opp_cpumask_remove_table,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 7d4a31571608..41bc5397f4bb 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -13,6 +13,8 @@
* Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -27,7 +29,6 @@
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
-#define PFX "speedstep-centrino: "
#define MAINTAINER "linux-pm@vger.kernel.org"
#define INTEL_MSR_RANGE (0xffff)
@@ -386,8 +387,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
/* check to see if it stuck */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
- printk(KERN_INFO PFX
- "couldn't enable Enhanced SpeedStep\n");
+ pr_info("couldn't enable Enhanced SpeedStep\n");
return -ENODEV;
}
}
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 37555c6b86a7..b86953a3ddc4 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -18,6 +18,8 @@
* SPEEDSTEP - DEFINITIONS *
*********************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -68,13 +70,13 @@ static int speedstep_find_register(void)
/* get PMBASE */
pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
if (!(pmbase & 0x01)) {
- printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+ pr_err("could not find speedstep register\n");
return -ENODEV;
}
pmbase &= 0xFFFFFFFE;
if (!pmbase) {
- printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+ pr_err("could not find speedstep register\n");
return -ENODEV;
}
@@ -136,7 +138,7 @@ static void speedstep_set_state(unsigned int state)
pr_debug("change to %u MHz succeeded\n",
speedstep_get_frequency(speedstep_processor) / 1000);
else
- printk(KERN_ERR "cpufreq: change failed - I/O error\n");
+ pr_err("change failed - I/O error\n");
return;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 15d3214aaa00..1b8062182c81 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -8,6 +8,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -153,7 +155,7 @@ static unsigned int pentium_core_get_frequency(void)
fsb = 333333;
break;
default:
- printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
+ pr_err("PCORE - MSR_FSB_FREQ undefined value\n");
}
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
@@ -453,11 +455,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
*/
if (*transition_latency > 10000000 ||
*transition_latency < 50000) {
- printk(KERN_WARNING PFX "frequency transition "
- "measured seems out of range (%u "
- "nSec), falling back to a safe one of"
- "%u nSec.\n",
- *transition_latency, 500000);
+ pr_warn("frequency transition measured seems out of range (%u nSec), falling back to a safe one of %u nSec\n",
+ *transition_latency, 500000);
*transition_latency = 500000;
}
}
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 819229e824fb..770a9ae1999a 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -12,6 +12,8 @@
* SPEEDSTEP - DEFINITIONS *
*********************************************************************/
+#define pr_fmt(fmt) "cpufreq: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -204,9 +206,8 @@ static void speedstep_set_state(unsigned int state)
(speedstep_freqs[new_state].frequency / 1000),
retry, result);
else
- printk(KERN_ERR "cpufreq: change to state %u "
- "failed with new_state %u and result %u\n",
- state, new_state, result);
+ pr_err("change to state %u failed with new_state %u and result %u\n",
+ state, new_state, result);
return;
}
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 20bcceb58ccc..43530254201a 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -14,7 +14,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
-#include <linux/cpufreq-dt.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -69,10 +68,6 @@ static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
clk_set_parent(priv->cpu_clk, priv->pllx_clk);
}
-static struct cpufreq_dt_platform_data cpufreq_dt_pd = {
- .independent_clocks = false,
-};
-
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
struct tegra124_cpufreq_priv *priv;
@@ -129,8 +124,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
cpufreq_dt_devinfo.name = "cpufreq-dt";
cpufreq_dt_devinfo.parent = &pdev->dev;
- cpufreq_dt_devinfo.data = &cpufreq_dt_pd;
- cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd);
priv->cpufreq_dt_pdev =
platform_device_register_full(&cpufreq_dt_devinfo);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 433e93fd4900..87e5bdc5ec74 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,8 +27,9 @@
#include "arm_big_little.h"
-static int ve_spc_init_opp_table(struct device *cpu_dev)
+static int ve_spc_init_opp_table(const struct cpumask *cpumask)
{
+ struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index f996efc56605..2b8e6ce62e81 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
- ktime_t time_start, time_end;
+ u64 time_start, time_end;
s64 diff;
/*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
- time_start = ktime_get();
+ time_start = local_clock();
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
start_critical_timings();
- time_end = ktime_get();
+ time_end = local_clock();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
@@ -217,7 +217,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, entered_state))
local_irq_enable();
- diff = ktime_to_us(ktime_sub(time_end, time_start));
+ /*
+ * local_clock() returns the time in nanosecond, let's shift
+ * by 10 (divide by 1024) to have microsecond based time.
+ */
+ diff = (time_end - time_start) >> 10;
if (diff > INT_MAX)
diff = INT_MAX;
@@ -433,6 +437,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
list_del(&dev->device_list);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
module_put(drv->owner);
+
+ dev->registered = 0;
}
static void __cpuidle_device_init(struct cpuidle_device *dev)
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 477fffdb4f49..d77ba2f12242 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -279,6 +279,14 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config HW_RANDOM_PPC4XX
+ bool "PowerPC 4xx generic true random number generator support"
+ depends on CRYPTO_DEV_PPC4XX && HW_RANDOM
+ default y
+ ---help---
+ This option provides the kernel-side support for the TRNG hardware
+ found in the security function of some PowerPC 4xx SoCs.
+
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
@@ -302,15 +310,16 @@ config CRYPTO_DEV_OMAP_AES
want to use the OMAP module for AES algorithms.
config CRYPTO_DEV_OMAP_DES
- tristate "Support for OMAP DES3DES hw engine"
+ tristate "Support for OMAP DES/3DES hw engine"
depends on ARCH_OMAP2PLUS
select CRYPTO_DES
select CRYPTO_BLKCIPHER
+ select CRYPTO_ENGINE
help
OMAP processors have DES/3DES module accelerator. Select this if you
want to use the OMAP module for DES and 3DES algorithms. Currently
- the ECB and CBC modes of operation supported by the driver. Also
- accesses made on unaligned boundaries are also supported.
+ the ECB and CBC modes of operation are supported by the driver. Also
+ accesses made on unaligned boundaries are supported.
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
@@ -340,9 +349,19 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
+config CRYPTO_DEV_MXC_SCC
+ tristate "Support for Freescale Security Controller (SCC)"
+ depends on ARCH_MXC && OF
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
+ help
+ This option enables support for the Security Controller (SCC)
+ found in Freescale i.MX25 chips.
+
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM && HAS_DMA
select CRYPTO_AES
select CRYPTO_BLKCIPHER
help
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 713de9d11148..3c6432dd09d9 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index 5c0c62b65d69..b95539928fdf 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 62134c8a2260..dae1e39139e9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -40,6 +40,7 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
+#include "crypto4xx_trng.h"
#define PPC4XX_SEC_VERSION_STR "0.5"
@@ -1225,6 +1226,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (rc)
goto err_start_dev;
+ ppc4xx_trng_probe(core_dev);
return 0;
err_start_dev:
@@ -1252,6 +1254,8 @@ static int crypto4xx_remove(struct platform_device *ofdev)
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ ppc4xx_trng_remove(core_dev);
+
free_irq(core_dev->irq, dev);
irq_dispose_mapping(core_dev->irq);
@@ -1272,7 +1276,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = "crypto4xx",
+ .name = MODULE_NAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
@@ -1284,4 +1288,3 @@ module_platform_driver(crypto4xx_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
-
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index bac0bdeb4b5f..ecfdcfe3698d 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -24,6 +24,8 @@
#include <crypto/internal/hash.h>
+#define MODULE_NAME "crypto4xx"
+
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -72,6 +74,7 @@ struct crypto4xx_device {
char *name;
u64 ce_phy_address;
void __iomem *ce_base;
+ void __iomem *trng_base;
void *pdr; /* base address of packet
descriptor ring */
@@ -106,6 +109,7 @@ struct crypto4xx_core_device {
struct device *device;
struct platform_device *ofdev;
struct crypto4xx_device *dev;
+ struct hwrng *trng;
u32 int_status;
u32 irq;
struct tasklet_struct tasklet;
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 5f5fbc0716ff..46fe57c8f6eb 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -125,6 +125,7 @@
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
+#define PPC4XX_TRNG_EN 0x00020000
#define PPC4XX_INT_DESCR_CNT 4
#define PPC4XX_INT_TIMEOUT_CNT 0
#define PPC4XX_INT_CFG 1
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
new file mode 100644
index 000000000000..677ca17fd223
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -0,0 +1,131 @@
+/*
+ * Generic PowerPC 44x RNG driver
+ *
+ * Copyright 2011 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include "crypto4xx_core.h"
+#include "crypto4xx_trng.h"
+#include "crypto4xx_reg_def.h"
+
+#define PPC4XX_TRNG_CTRL 0x0008
+#define PPC4XX_TRNG_CTRL_DALM 0x20
+#define PPC4XX_TRNG_STAT 0x0004
+#define PPC4XX_TRNG_STAT_B 0x1
+#define PPC4XX_TRNG_DATA 0x0000
+
+static int ppc4xx_trng_data_present(struct hwrng *rng, int wait)
+{
+ struct crypto4xx_device *dev = (void *)rng->priv;
+ int busy, i, present = 0;
+
+ for (i = 0; i < 20; i++) {
+ busy = (in_le32(dev->trng_base + PPC4XX_TRNG_STAT) &
+ PPC4XX_TRNG_STAT_B);
+ if (!busy || !wait) {
+ present = 1;
+ break;
+ }
+ udelay(10);
+ }
+ return present;
+}
+
+static int ppc4xx_trng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct crypto4xx_device *dev = (void *)rng->priv;
+ *data = in_le32(dev->trng_base + PPC4XX_TRNG_DATA);
+ return 4;
+}
+
+static void ppc4xx_trng_enable(struct crypto4xx_device *dev, bool enable)
+{
+ u32 device_ctrl;
+
+ device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ if (enable)
+ device_ctrl |= PPC4XX_TRNG_EN;
+ else
+ device_ctrl &= ~PPC4XX_TRNG_EN;
+ writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+}
+
+static const struct of_device_id ppc4xx_trng_match[] = {
+ { .compatible = "ppc4xx-rng", },
+ { .compatible = "amcc,ppc460ex-rng", },
+ { .compatible = "amcc,ppc440epx-rng", },
+ {},
+};
+
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
+{
+ struct crypto4xx_device *dev = core_dev->dev;
+ struct device_node *trng = NULL;
+ struct hwrng *rng = NULL;
+ int err;
+
+ /* Find the TRNG device node and map it */
+ trng = of_find_matching_node(NULL, ppc4xx_trng_match);
+ if (!trng || !of_device_is_available(trng))
+ return;
+
+ dev->trng_base = of_iomap(trng, 0);
+ of_node_put(trng);
+ if (!dev->trng_base)
+ goto err_out;
+
+ rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ goto err_out;
+
+ rng->name = MODULE_NAME;
+ rng->data_present = ppc4xx_trng_data_present;
+ rng->data_read = ppc4xx_trng_data_read;
+ rng->priv = (unsigned long) dev;
+ core_dev->trng = rng;
+ ppc4xx_trng_enable(dev, true);
+ out_le32(dev->trng_base + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
+ err = devm_hwrng_register(core_dev->device, core_dev->trng);
+ if (err) {
+ ppc4xx_trng_enable(dev, false);
+ dev_err(core_dev->device, "failed to register hwrng (%d).\n",
+ err);
+ goto err_out;
+ }
+ return;
+
+err_out:
+ of_node_put(trng);
+ iounmap(dev->trng_base);
+ kfree(rng);
+ dev->trng_base = NULL;
+ core_dev->trng = NULL;
+}
+
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev)
+{
+ if (core_dev && core_dev->trng) {
+ struct crypto4xx_device *dev = core_dev->dev;
+
+ devm_hwrng_unregister(core_dev->device, core_dev->trng);
+ ppc4xx_trng_enable(dev, false);
+ iounmap(dev->trng_base);
+ kfree(core_dev->trng);
+ }
+}
+
+MODULE_ALIAS("ppc4xx_rng");
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
new file mode 100644
index 000000000000..931d22531f51
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -0,0 +1,34 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file defines the security context
+ * associate format.
+ */
+
+#ifndef __CRYPTO4XX_TRNG_H__
+#define __CRYPTO4XX_TRNG_H__
+
+#ifdef CONFIG_HW_RANDOM_PPC4XX
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
+#else
+static inline void ppc4xx_trng_probe(
+ struct crypto4xx_device *dev __maybe_unused) { }
+static inline void ppc4xx_trng_remove(
+ struct crypto4xx_device *dev __maybe_unused) { }
+#endif
+
+#endif
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 6fd63a600614..5ef4be22eb80 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
struct device *caam_jr_alloc(void)
{
struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
- struct device *dev = NULL;
+ struct device *dev = ERR_PTR(-ENODEV);
int min_tfm_cnt = INT_MAX;
int tfm_cnt;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 6e37845abf8f..2238f77aa248 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
+ select DMA_ENGINE
+ select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index b750592cc936..ee4d2741b3ab 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,5 +1,9 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o
+ccp-objs := ccp-dev.o \
+ ccp-ops.o \
+ ccp-dev-v3.o \
+ ccp-platform.o \
+ ccp-dmaengine.o
ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 7d5eab49179e..d7a710347967 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -406,6 +406,11 @@ static int ccp_init(struct ccp_device *ccp)
goto e_kthread;
}
+ /* Register the DMA engine support */
+ ret = ccp_dmaengine_register(ccp);
+ if (ret)
+ goto e_hwrng;
+
ccp_add_device(ccp);
/* Enable interrupts */
@@ -413,6 +418,9 @@ static int ccp_init(struct ccp_device *ccp)
return 0;
+e_hwrng:
+ hwrng_unregister(&ccp->hwrng);
+
e_kthread:
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread)
@@ -436,6 +444,9 @@ static void ccp_destroy(struct ccp_device *ccp)
/* Remove this device from the list of available units first */
ccp_del_device(ccp);
+ /* Unregister the DMA engine */
+ ccp_dmaengine_unregister(ccp);
+
/* Unregister the RNG */
hwrng_unregister(&ccp->hwrng);
@@ -515,7 +526,7 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct ccp_actions ccp3_actions = {
+static const struct ccp_actions ccp3_actions = {
.perform_aes = ccp_perform_aes,
.perform_xts_aes = ccp_perform_xts_aes,
.perform_sha = ccp_perform_sha,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4dbc18727235..87b9f2bfa623 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -16,7 +16,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include <linux/rwlock_types.h>
+#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7745d0be491d..bd41ffceff82 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -22,6 +22,9 @@
#include <linux/dmapool.h>
#include <linux/hw_random.h>
#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -159,7 +162,7 @@ struct ccp_actions {
/* Structure to hold CCP version-specific values */
struct ccp_vdata {
unsigned int version;
- struct ccp_actions *perform;
+ const struct ccp_actions *perform;
};
extern struct ccp_vdata ccpv3;
@@ -167,6 +170,39 @@ extern struct ccp_vdata ccpv3;
struct ccp_device;
struct ccp_cmd;
+struct ccp_dma_cmd {
+ struct list_head entry;
+
+ struct ccp_cmd ccp_cmd;
+};
+
+struct ccp_dma_desc {
+ struct list_head entry;
+
+ struct ccp_device *ccp;
+
+ struct list_head pending;
+ struct list_head active;
+
+ enum dma_status status;
+ struct dma_async_tx_descriptor tx_desc;
+ size_t len;
+};
+
+struct ccp_dma_chan {
+ struct ccp_device *ccp;
+
+ spinlock_t lock;
+ struct list_head pending;
+ struct list_head active;
+ struct list_head complete;
+
+ struct tasklet_struct cleanup_tasklet;
+
+ enum dma_status status;
+ struct dma_chan dma_chan;
+};
+
struct ccp_cmd_queue {
struct ccp_device *ccp;
@@ -261,6 +297,14 @@ struct ccp_device {
unsigned int hwrng_retries;
/*
+ * Support for the CCP DMA capabilities
+ */
+ struct dma_device dma_dev;
+ struct ccp_dma_chan *ccp_dma_chan;
+ struct kmem_cache *dma_cmd_cache;
+ struct kmem_cache *dma_desc_cache;
+
+ /*
* A counter used to generate job-ids for cmds submitted to the CCP
*/
atomic_t current_id ____cacheline_aligned;
@@ -418,4 +462,7 @@ int ccp_cmd_queue_thread(void *data);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
+int ccp_dmaengine_register(struct ccp_device *ccp);
+void ccp_dmaengine_unregister(struct ccp_device *ccp);
+
#endif
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
new file mode 100644
index 000000000000..94f77b0f9ae7
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -0,0 +1,727 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "../../dma/dmaengine.h"
+
+#define CCP_DMA_WIDTH(_mask) \
+({ \
+ u64 mask = _mask + 1; \
+ (mask == 0) ? 64 : fls64(mask); \
+})
+
+static void ccp_free_cmd_resources(struct ccp_device *ccp,
+ struct list_head *list)
+{
+ struct ccp_dma_cmd *cmd, *ctmp;
+
+ list_for_each_entry_safe(cmd, ctmp, list, entry) {
+ list_del(&cmd->entry);
+ kmem_cache_free(ccp->dma_cmd_cache, cmd);
+ }
+}
+
+static void ccp_free_desc_resources(struct ccp_device *ccp,
+ struct list_head *list)
+{
+ struct ccp_dma_desc *desc, *dtmp;
+
+ list_for_each_entry_safe(desc, dtmp, list, entry) {
+ ccp_free_cmd_resources(ccp, &desc->active);
+ ccp_free_cmd_resources(ccp, &desc->pending);
+
+ list_del(&desc->entry);
+ kmem_cache_free(ccp->dma_desc_cache, desc);
+ }
+}
+
+static void ccp_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ ccp_free_desc_resources(chan->ccp, &chan->complete);
+ ccp_free_desc_resources(chan->ccp, &chan->active);
+ ccp_free_desc_resources(chan->ccp, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
+ struct list_head *list)
+{
+ struct ccp_dma_desc *desc, *dtmp;
+
+ list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
+ if (!async_tx_test_ack(&desc->tx_desc))
+ continue;
+
+ dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+ ccp_free_cmd_resources(ccp, &desc->active);
+ ccp_free_cmd_resources(ccp, &desc->pending);
+
+ list_del(&desc->entry);
+ kmem_cache_free(ccp->dma_desc_cache, desc);
+ }
+}
+
+static void ccp_do_cleanup(unsigned long data)
+{
+ struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
+ dma_chan_name(&chan->dma_chan));
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
+{
+ struct ccp_dma_cmd *cmd;
+ int ret;
+
+ cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
+ list_move(&cmd->entry, &desc->active);
+
+ dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
+ desc->tx_desc.cookie, cmd);
+
+ ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
+ if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
+ return 0;
+
+ dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
+ ret, desc->tx_desc.cookie, cmd);
+
+ return ret;
+}
+
+static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
+{
+ struct ccp_dma_cmd *cmd;
+
+ cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
+ entry);
+ if (!cmd)
+ return;
+
+ dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
+ __func__, desc->tx_desc.cookie, cmd);
+
+ list_del(&cmd->entry);
+ kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
+}
+
+static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
+ struct ccp_dma_desc *desc)
+{
+ /* Move current DMA descriptor to the complete list */
+ if (desc)
+ list_move(&desc->entry, &chan->complete);
+
+ /* Get the next DMA descriptor on the active list */
+ desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+ entry);
+
+ return desc;
+}
+
+static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
+ struct ccp_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ unsigned long flags;
+
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ /* Remove the DMA command from the list and free it */
+ ccp_free_active_cmd(desc);
+
+ if (!list_empty(&desc->pending)) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+
+ /* Error, free remaining commands and move on */
+ ccp_free_cmd_resources(desc->ccp,
+ &desc->pending);
+ }
+
+ tx_desc = &desc->tx_desc;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (desc) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dev_dbg(desc->ccp->dev,
+ "%s - tx %d complete, status=%u\n", __func__,
+ desc->tx_desc.cookie, desc->status);
+
+ dma_cookie_complete(tx_desc);
+ }
+
+ desc = __ccp_next_dma_desc(chan, desc);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (tx_desc) {
+ if (tx_desc->callback &&
+ (tx_desc->flags & DMA_PREP_INTERRUPT))
+ tx_desc->callback(tx_desc->callback_param);
+
+ dma_run_dependencies(tx_desc);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
+{
+ struct ccp_dma_desc *desc;
+
+ if (list_empty(&chan->pending))
+ return NULL;
+
+ desc = list_empty(&chan->active)
+ ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
+ : NULL;
+
+ list_splice_tail_init(&chan->pending, &chan->active);
+
+ return desc;
+}
+
+static void ccp_cmd_callback(void *data, int err)
+{
+ struct ccp_dma_desc *desc = data;
+ struct ccp_dma_chan *chan;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
+ dma_chan);
+
+ dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
+ __func__, desc->tx_desc.cookie, err);
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ /* Check for DMA descriptor completion */
+ desc = ccp_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc || (chan->status == DMA_PAUSED))
+ break;
+
+ ret = ccp_issue_next_cmd(desc);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+
+ tasklet_schedule(&chan->cleanup_tasklet);
+}
+
+static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
+{
+ struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
+ tx_desc);
+ struct ccp_dma_chan *chan;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ cookie = dma_cookie_assign(tx_desc);
+ list_add_tail(&desc->entry, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
+ __func__, cookie);
+
+ return cookie;
+}
+
+static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
+{
+ struct ccp_dma_cmd *cmd;
+
+ cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
+ if (cmd)
+ memset(cmd, 0, sizeof(*cmd));
+
+ return cmd;
+}
+
+static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
+ unsigned long flags)
+{
+ struct ccp_dma_desc *desc;
+
+ desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ memset(desc, 0, sizeof(*desc));
+
+ dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
+ desc->tx_desc.flags = flags;
+ desc->tx_desc.tx_submit = ccp_tx_submit;
+ desc->ccp = chan->ccp;
+ INIT_LIST_HEAD(&desc->pending);
+ INIT_LIST_HEAD(&desc->active);
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
+ struct scatterlist *dst_sg,
+ unsigned int dst_nents,
+ struct scatterlist *src_sg,
+ unsigned int src_nents,
+ unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_device *ccp = chan->ccp;
+ struct ccp_dma_desc *desc;
+ struct ccp_dma_cmd *cmd;
+ struct ccp_cmd *ccp_cmd;
+ struct ccp_passthru_nomap_engine *ccp_pt;
+ unsigned int src_offset, src_len;
+ unsigned int dst_offset, dst_len;
+ unsigned int len;
+ unsigned long sflags;
+ size_t total_len;
+
+ if (!dst_sg || !src_sg)
+ return NULL;
+
+ if (!dst_nents || !src_nents)
+ return NULL;
+
+ desc = ccp_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ total_len = 0;
+
+ src_len = sg_dma_len(src_sg);
+ src_offset = 0;
+
+ dst_len = sg_dma_len(dst_sg);
+ dst_offset = 0;
+
+ while (true) {
+ if (!src_len) {
+ src_nents--;
+ if (!src_nents)
+ break;
+
+ src_sg = sg_next(src_sg);
+ if (!src_sg)
+ break;
+
+ src_len = sg_dma_len(src_sg);
+ src_offset = 0;
+ continue;
+ }
+
+ if (!dst_len) {
+ dst_nents--;
+ if (!dst_nents)
+ break;
+
+ dst_sg = sg_next(dst_sg);
+ if (!dst_sg)
+ break;
+
+ dst_len = sg_dma_len(dst_sg);
+ dst_offset = 0;
+ continue;
+ }
+
+ len = min(dst_len, src_len);
+
+ cmd = ccp_alloc_dma_cmd(chan);
+ if (!cmd)
+ goto err;
+
+ ccp_cmd = &cmd->ccp_cmd;
+ ccp_pt = &ccp_cmd->u.passthru_nomap;
+ ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
+ ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
+ ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
+ ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
+ ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
+ ccp_pt->src_len = len;
+ ccp_pt->final = 1;
+ ccp_cmd->callback = ccp_cmd_callback;
+ ccp_cmd->data = desc;
+
+ list_add_tail(&cmd->entry, &desc->pending);
+
+ dev_dbg(ccp->dev,
+ "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
+ cmd, &ccp_pt->src_dma,
+ &ccp_pt->dst_dma, ccp_pt->src_len);
+
+ total_len += len;
+
+ src_len -= len;
+ src_offset += len;
+
+ dst_len -= len;
+ dst_offset += len;
+ }
+
+ desc->len = total_len;
+
+ if (list_empty(&desc->pending))
+ goto err;
+
+ dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+ spin_lock_irqsave(&chan->lock, sflags);
+
+ list_add_tail(&desc->entry, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, sflags);
+
+ return desc;
+
+err:
+ ccp_free_cmd_resources(ccp, &desc->pending);
+ kmem_cache_free(ccp->dma_desc_cache, desc);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
+ struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
+ unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ struct scatterlist dst_sg, src_sg;
+
+ dev_dbg(chan->ccp->dev,
+ "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
+ __func__, &src, &dst, len, flags);
+
+ sg_init_table(&dst_sg, 1);
+ sg_dma_address(&dst_sg) = dst;
+ sg_dma_len(&dst_sg) = len;
+
+ sg_init_table(&src_sg, 1);
+ sg_dma_address(&src_sg) = src;
+ sg_dma_len(&src_sg) = len;
+
+ desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->tx_desc;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
+ struct dma_chan *dma_chan, struct scatterlist *dst_sg,
+ unsigned int dst_nents, struct scatterlist *src_sg,
+ unsigned int src_nents, unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+
+ dev_dbg(chan->ccp->dev,
+ "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
+ __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
+
+ desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
+ flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->tx_desc;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
+ struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+
+ desc = ccp_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->tx_desc;
+}
+
+static void ccp_issue_pending(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = __ccp_pending_to_active(chan);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (desc)
+ ccp_cmd_callback(desc, 0);
+}
+
+static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ enum dma_status ret;
+ unsigned long flags;
+
+ if (chan->status == DMA_PAUSED) {
+ ret = DMA_PAUSED;
+ goto out;
+ }
+
+ ret = dma_cookie_status(dma_chan, cookie, state);
+ if (ret == DMA_COMPLETE) {
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* Get status from complete chain, if still there */
+ list_for_each_entry(desc, &chan->complete, entry) {
+ if (desc->tx_desc.cookie != cookie)
+ continue;
+
+ ret = desc->status;
+ break;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+out:
+ dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
+
+ return ret;
+}
+
+static int ccp_pause(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+
+ chan->status = DMA_PAUSED;
+
+ /*TODO: Wait for active DMA to complete before returning? */
+
+ return 0;
+}
+
+static int ccp_resume(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+ entry);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Indicate the channel is running again */
+ chan->status = DMA_IN_PROGRESS;
+
+ /* If there was something active, re-start */
+ if (desc)
+ ccp_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int ccp_terminate_all(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+ /*TODO: Wait for active DMA to complete before continuing */
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /*TODO: Purge the complete list? */
+ ccp_free_desc_resources(chan->ccp, &chan->active);
+ ccp_free_desc_resources(chan->ccp, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+int ccp_dmaengine_register(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_device *dma_dev = &ccp->dma_dev;
+ struct dma_chan *dma_chan;
+ char *dma_cmd_cache_name;
+ char *dma_desc_cache_name;
+ unsigned int i;
+ int ret;
+
+ ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
+ sizeof(*(ccp->ccp_dma_chan)),
+ GFP_KERNEL);
+ if (!ccp->ccp_dma_chan)
+ return -ENOMEM;
+
+ dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+ "%s-dmaengine-cmd-cache",
+ ccp->name);
+ if (!dma_cmd_cache_name)
+ return -ENOMEM;
+
+ ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
+ sizeof(struct ccp_dma_cmd),
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ccp->dma_cmd_cache)
+ return -ENOMEM;
+
+ dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ ccp->name);
+ if (!dma_cmd_cache_name)
+ return -ENOMEM;
+ ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
+ sizeof(struct ccp_dma_desc),
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ccp->dma_desc_cache) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ dma_dev->dev = ccp->dev;
+ dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+ dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SG, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ chan->ccp = ccp;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending);
+ INIT_LIST_HEAD(&chan->active);
+ INIT_LIST_HEAD(&chan->complete);
+
+ tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
+ (unsigned long)chan);
+
+ dma_chan->device = dma_dev;
+ dma_cookie_init(dma_chan);
+
+ list_add_tail(&dma_chan->device_node, &dma_dev->channels);
+ }
+
+ dma_dev->device_free_chan_resources = ccp_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
+ dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
+ dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
+ dma_dev->device_issue_pending = ccp_issue_pending;
+ dma_dev->device_tx_status = ccp_tx_status;
+ dma_dev->device_pause = ccp_pause;
+ dma_dev->device_resume = ccp_resume;
+ dma_dev->device_terminate_all = ccp_terminate_all;
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(ccp->dma_desc_cache);
+
+err_cache:
+ kmem_cache_destroy(ccp->dma_cmd_cache);
+
+ return ret;
+}
+
+void ccp_dmaengine_unregister(struct ccp_device *ccp)
+{
+ struct dma_device *dma_dev = &ccp->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(ccp->dma_desc_cache);
+ kmem_cache_destroy(ccp->dma_cmd_cache);
+}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index eefdf595f758..ffa2891035ac 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1427,6 +1427,70 @@ e_mask:
return ret;
}
+static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
+ struct ccp_dm_workarea mask;
+ struct ccp_op op;
+ int ret;
+
+ if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
+ return -EINVAL;
+
+ if (!pt->src_dma || !pt->dst_dma)
+ return -EINVAL;
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
+ return -EINVAL;
+ if (!pt->mask)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ /* Load the mask */
+ op.ksb_key = cmd_q->ksb_key;
+
+ mask.length = pt->mask_len;
+ mask.dma.address = pt->mask;
+ mask.dma.length = pt->mask_len;
+
+ ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ return ret;
+ }
+ }
+
+ /* Send data to the CCP Passthru engine */
+ op.eom = 1;
+ op.soc = 1;
+
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+ op.src.u.dma.address = pt->src_dma;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = pt->src_len;
+
+ op.dst.type = CCP_MEMTYPE_SYSTEM;
+ op.dst.u.dma.address = pt->dst_dma;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = pt->src_len;
+
+ ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
+ if (ret)
+ cmd->engine_error = cmd_q->cmd_error;
+
+ return ret;
+}
+
static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_ecc_engine *ecc = &cmd->u.ecc;
@@ -1762,7 +1826,10 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_run_rsa_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_PASSTHRU:
- ret = ccp_run_passthru_cmd(cmd_q, cmd);
+ if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
+ ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
+ else
+ ret = ccp_run_passthru_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_ECC:
ret = ccp_run_ecc_cmd(cmd_q, cmd);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 80239ae69527..e8ef9fd24a16 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -475,18 +475,18 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
if (dram && cesa->caps->has_tdma)
- mv_cesa_conf_mbus_windows(&cesa->engines[i], dram);
+ mv_cesa_conf_mbus_windows(engine, dram);
- writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS);
+ writel(0, engine->regs + CESA_SA_INT_STATUS);
writel(CESA_SA_CFG_STOP_DIG_ERR,
- cesa->engines[i].regs + CESA_SA_CFG);
+ engine->regs + CESA_SA_CFG);
writel(engine->sram_dma & CESA_SA_SRAM_MSK,
- cesa->engines[i].regs + CESA_SA_DESC_P0);
+ engine->regs + CESA_SA_DESC_P0);
ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
IRQF_ONESHOT,
dev_name(&pdev->dev),
- &cesa->engines[i]);
+ engine);
if (ret)
goto err_cleanup;
}
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 7ca2e0f9dc2e..7a5058da9151 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -768,8 +768,7 @@ static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
*len = creq->len;
memcpy(hash, creq->state, digsize);
memset(cache, 0, blocksize);
- if (creq->cache)
- memcpy(cache, creq->cache, creq->cache_ptr);
+ memcpy(cache, creq->cache, creq->cache_ptr);
return 0;
}
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 76427981275b..0ad8f1ecf175 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -99,12 +99,11 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
struct mv_cesa_tdma_desc *new_tdma = NULL;
dma_addr_t dma_handle;
- new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags,
- &dma_handle);
+ new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
+ &dma_handle);
if (!new_tdma)
return ERR_PTR(-ENOMEM);
- memset(new_tdma, 0, sizeof(*new_tdma));
new_tdma->cur_dma = dma_handle;
if (chain->last) {
chain->last->next_dma = cpu_to_le32(dma_handle);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
new file mode 100644
index 000000000000..ff383ef83871
--- /dev/null
+++ b/drivers/crypto/mxc-scc.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ *
+ * The driver is based on information gathered from
+ * drivers/mxc/security/mxc_scc.c which can be found in
+ * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+/* Secure Memory (SCM) registers */
+#define SCC_SCM_RED_START 0x0000
+#define SCC_SCM_BLACK_START 0x0004
+#define SCC_SCM_LENGTH 0x0008
+#define SCC_SCM_CTRL 0x000C
+#define SCC_SCM_STATUS 0x0010
+#define SCC_SCM_ERROR_STATUS 0x0014
+#define SCC_SCM_INTR_CTRL 0x0018
+#define SCC_SCM_CFG 0x001C
+#define SCC_SCM_INIT_VECTOR_0 0x0020
+#define SCC_SCM_INIT_VECTOR_1 0x0024
+#define SCC_SCM_RED_MEMORY 0x0400
+#define SCC_SCM_BLACK_MEMORY 0x0800
+
+/* Security Monitor (SMN) Registers */
+#define SCC_SMN_STATUS 0x1000
+#define SCC_SMN_COMMAND 0x1004
+#define SCC_SMN_SEQ_START 0x1008
+#define SCC_SMN_SEQ_END 0x100C
+#define SCC_SMN_SEQ_CHECK 0x1010
+#define SCC_SMN_BIT_COUNT 0x1014
+#define SCC_SMN_BITBANK_INC_SIZE 0x1018
+#define SCC_SMN_BITBANK_DECREMENT 0x101C
+#define SCC_SMN_COMPARE_SIZE 0x1020
+#define SCC_SMN_PLAINTEXT_CHECK 0x1024
+#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
+#define SCC_SMN_TIMER_IV 0x102C
+#define SCC_SMN_TIMER_CONTROL 0x1030
+#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
+#define SCC_SMN_TIMER 0x1038
+
+#define SCC_SCM_CTRL_START_CIPHER BIT(2)
+#define SCC_SCM_CTRL_CBC_MODE BIT(1)
+#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
+
+#define SCC_SCM_STATUS_LEN_ERR BIT(12)
+#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
+#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
+#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
+#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
+#define SCC_SCM_STATUS_SEC_KEY BIT(7)
+#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
+#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
+#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
+#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
+#define SCC_SCM_STATUS_CIPHERING BIT(2)
+#define SCC_SCM_STATUS_ZEROIZING BIT(1)
+#define SCC_SCM_STATUS_BUSY BIT(0)
+
+#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
+#define SCC_SMN_STATE_START 0x0
+/* The SMN is zeroizing its RAM during reset */
+#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
+/* SMN has passed internal checks */
+#define SCC_SMN_STATE_HEALTH_CHECK 0x6
+/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
+#define SCC_SMN_STATE_FAIL 0x9
+/* SCC is in secure state. SCM is using secret key. */
+#define SCC_SMN_STATE_SECURE 0xA
+/* SCC is not secure. SCM is using default key. */
+#define SCC_SMN_STATE_NON_SECURE 0xC
+
+#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
+#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
+#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
+
+/* Size, in blocks, of Red memory. */
+#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
+#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
+/* Size, in blocks, of Black memory. */
+#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
+#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
+/* Number of bytes per block. */
+#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
+
+#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
+#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
+#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
+#define SCC_SMN_COMMAND_EN_INTR BIT(1)
+#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
+
+#define SCC_KEY_SLOTS 20
+#define SCC_MAX_KEY_SIZE 32
+#define SCC_KEY_SLOT_SIZE 32
+
+#define SCC_CRC_CCITT_START 0xFFFF
+
+/*
+ * Offset into each RAM of the base of the area which is not
+ * used for Stored Keys.
+ */
+#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
+
+/* Fixed padding for appending to plaintext to fill out a block */
+static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
+
+enum mxc_scc_state {
+ SCC_STATE_OK,
+ SCC_STATE_UNIMPLEMENTED,
+ SCC_STATE_FAILED
+};
+
+struct mxc_scc {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ bool hw_busy;
+ spinlock_t lock;
+ struct crypto_queue queue;
+ struct crypto_async_request *req;
+ int block_size_bytes;
+ int black_ram_size_blocks;
+ int memory_size_bytes;
+ int bytes_remaining;
+
+ void __iomem *red_memory;
+ void __iomem *black_memory;
+};
+
+struct mxc_scc_ctx {
+ struct mxc_scc *scc;
+ struct scatterlist *sg_src;
+ size_t src_nents;
+ struct scatterlist *sg_dst;
+ size_t dst_nents;
+ unsigned int offset;
+ unsigned int size;
+ unsigned int ctrl;
+};
+
+struct mxc_scc_crypto_tmpl {
+ struct mxc_scc *scc;
+ struct crypto_alg alg;
+};
+
+static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
+ struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mxc_scc *scc = ctx->scc;
+ size_t len;
+ void __iomem *from;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+ from = scc->red_memory;
+ else
+ from = scc->black_memory;
+
+ dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
+ ctx->dst_nents * 8);
+ len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
+ from, ctx->size, ctx->offset);
+ if (!len) {
+ dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
+ return -EINVAL;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "red memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->red_memory, ctx->size, 1);
+ print_hex_dump(KERN_ERR,
+ "black memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->black_memory, ctx->size, 1);
+#endif
+
+ ctx->offset += len;
+
+ if (ctx->offset < ablkreq->nbytes)
+ return -EINPROGRESS;
+
+ return 0;
+}
+
+static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
+ struct mxc_scc_ctx *ctx)
+{
+ struct mxc_scc *scc = ctx->scc;
+ int nents;
+
+ nents = sg_nents_for_len(req->src, req->nbytes);
+ if (nents < 0) {
+ dev_err(scc->dev, "Invalid number of src SC");
+ return nents;
+ }
+ ctx->src_nents = nents;
+
+ nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (nents < 0) {
+ dev_err(scc->dev, "Invalid number of dst SC");
+ return nents;
+ }
+ ctx->dst_nents = nents;
+
+ ctx->size = 0;
+ ctx->offset = 0;
+
+ return 0;
+}
+
+static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
+ struct mxc_scc_ctx *ctx,
+ int result)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mxc_scc *scc = ctx->scc;
+
+ scc->req = NULL;
+ scc->bytes_remaining = scc->memory_size_bytes;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
+ memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
+ scc->block_size_bytes);
+
+ req->complete(req, result);
+ scc->hw_busy = false;
+
+ return 0;
+}
+
+static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
+ struct ablkcipher_request *req)
+{
+ u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
+ size_t len = min_t(size_t, req->nbytes - ctx->offset,
+ ctx->scc->bytes_remaining);
+ unsigned int padding_byte_count = 0;
+ struct mxc_scc *scc = ctx->scc;
+ void __iomem *to;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+ to = scc->black_memory;
+ else
+ to = scc->red_memory;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
+ memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
+ scc->block_size_bytes);
+
+ len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
+ to, len, ctx->offset);
+ if (!len) {
+ dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
+ return -EINVAL;
+ }
+
+ ctx->size = len;
+
+#ifdef DEBUG
+ dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
+ print_hex_dump(KERN_ERR,
+ "init vector0@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
+ 1);
+ print_hex_dump(KERN_ERR,
+ "red memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->red_memory, ctx->size, 1);
+ print_hex_dump(KERN_ERR,
+ "black memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->black_memory, ctx->size, 1);
+#endif
+
+ scc->bytes_remaining -= len;
+
+ padding_byte_count = len % scc->block_size_bytes;
+
+ if (padding_byte_count) {
+ memcpy(padding_buffer, scc_block_padding, padding_byte_count);
+ memcpy(to + len, padding_buffer, padding_byte_count);
+ ctx->size += padding_byte_count;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "data to encrypt@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ to, ctx->size, 1);
+#endif
+
+ return 0;
+}
+
+static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
+ struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mxc_scc *scc = ctx->scc;
+ int err;
+
+ dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
+ ablkreq->nbytes, ablkreq->src, ablkreq->dst);
+
+ writel(0, scc->base + SCC_SCM_ERROR_STATUS);
+
+ err = mxc_scc_put_data(ctx, ablkreq);
+ if (err) {
+ mxc_scc_ablkcipher_req_complete(req, ctx, err);
+ return;
+ }
+
+ dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
+ (void *)readl(scc->base + SCC_SCM_RED_START),
+ (void *)readl(scc->base + SCC_SCM_BLACK_START));
+
+ /* clear interrupt control registers */
+ writel(SCC_SCM_INTR_CTRL_CLR_INTR,
+ scc->base + SCC_SCM_INTR_CTRL);
+
+ writel((ctx->size / ctx->scc->block_size_bytes) - 1,
+ scc->base + SCC_SCM_LENGTH);
+
+ dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
+ ctx->size / ctx->scc->block_size_bytes,
+ (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
+ scc->red_memory);
+
+ writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
+}
+
+static irqreturn_t mxc_scc_int(int irq, void *priv)
+{
+ struct crypto_async_request *req;
+ struct mxc_scc_ctx *ctx;
+ struct mxc_scc *scc = priv;
+ int status;
+ int ret;
+
+ status = readl(scc->base + SCC_SCM_STATUS);
+
+ /* clear interrupt control registers */
+ writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
+
+ if (status & SCC_SCM_STATUS_BUSY)
+ return IRQ_NONE;
+
+ req = scc->req;
+ if (req) {
+ ctx = crypto_tfm_ctx(req->tfm);
+ ret = mxc_scc_get_data(ctx, req);
+ if (ret != -EINPROGRESS)
+ mxc_scc_ablkcipher_req_complete(req, ctx, ret);
+ else
+ mxc_scc_ablkcipher_next(ctx, req);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mxc_scc_cra_init(struct crypto_tfm *tfm)
+{
+ struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct mxc_scc_crypto_tmpl *algt;
+
+ algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
+
+ ctx->scc = algt->scc;
+ return 0;
+}
+
+static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
+{
+ struct crypto_async_request *req, *backlog;
+
+ if (ctx->scc->hw_busy)
+ return;
+
+ spin_lock_bh(&ctx->scc->lock);
+ backlog = crypto_get_backlog(&ctx->scc->queue);
+ req = crypto_dequeue_request(&ctx->scc->queue);
+ ctx->scc->req = req;
+ ctx->scc->hw_busy = true;
+ spin_unlock_bh(&ctx->scc->lock);
+
+ if (!req)
+ return;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ mxc_scc_ablkcipher_next(ctx, req);
+}
+
+static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
+ struct crypto_async_request *req)
+{
+ int ret;
+
+ spin_lock_bh(&ctx->scc->lock);
+ ret = crypto_enqueue_request(&ctx->scc->queue, req);
+ spin_unlock_bh(&ctx->scc->lock);
+
+ if (ret != -EINPROGRESS)
+ return ret;
+
+ mxc_scc_dequeue_req_unlocked(ctx);
+
+ return -EINPROGRESS;
+}
+
+static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
+ struct ablkcipher_request *req)
+{
+ int err;
+
+ err = mxc_scc_ablkcipher_req_init(req, ctx);
+ if (err)
+ return err;
+
+ return mxc_scc_queue_req(ctx, &req->base);
+}
+
+static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+ ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+ ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+ ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+ ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static void mxc_scc_hw_init(struct mxc_scc *scc)
+{
+ int offset;
+
+ offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
+
+ /* Fill the RED_START register */
+ writel(offset, scc->base + SCC_SCM_RED_START);
+
+ /* Fill the BLACK_START register */
+ writel(offset, scc->base + SCC_SCM_BLACK_START);
+
+ scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
+ SCC_NON_RESERVED_OFFSET;
+
+ scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
+ SCC_NON_RESERVED_OFFSET;
+
+ scc->bytes_remaining = scc->memory_size_bytes;
+}
+
+static int mxc_scc_get_config(struct mxc_scc *scc)
+{
+ int config;
+
+ config = readl(scc->base + SCC_SCM_CFG);
+
+ scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
+
+ scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
+
+ scc->memory_size_bytes = (scc->block_size_bytes *
+ scc->black_ram_size_blocks) -
+ SCC_NON_RESERVED_OFFSET;
+
+ return 0;
+}
+
+static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
+{
+ enum mxc_scc_state state;
+ int status;
+
+ status = readl(scc->base + SCC_SMN_STATUS) &
+ SCC_SMN_STATUS_STATE_MASK;
+
+ /* If in Health Check, try to bringup to secure state */
+ if (status & SCC_SMN_STATE_HEALTH_CHECK) {
+ /*
+ * Write a simple algorithm to the Algorithm Sequence
+ * Checker (ASC)
+ */
+ writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
+ writel(0x5555, scc->base + SCC_SMN_SEQ_END);
+ writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
+
+ status = readl(scc->base + SCC_SMN_STATUS) &
+ SCC_SMN_STATUS_STATE_MASK;
+ }
+
+ switch (status) {
+ case SCC_SMN_STATE_NON_SECURE:
+ case SCC_SMN_STATE_SECURE:
+ state = SCC_STATE_OK;
+ break;
+ case SCC_SMN_STATE_FAIL:
+ state = SCC_STATE_FAILED;
+ break;
+ default:
+ state = SCC_STATE_UNIMPLEMENTED;
+ break;
+ }
+
+ return state;
+}
+
+static struct mxc_scc_crypto_tmpl scc_ecb_des = {
+ .alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-scc",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mxc_scc_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mxc_scc_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .encrypt = mxc_scc_ecb_des_encrypt,
+ .decrypt = mxc_scc_ecb_des_decrypt,
+ }
+ }
+};
+
+static struct mxc_scc_crypto_tmpl scc_cbc_des = {
+ .alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-scc",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mxc_scc_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mxc_scc_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .encrypt = mxc_scc_cbc_des_encrypt,
+ .decrypt = mxc_scc_cbc_des_decrypt,
+ }
+ }
+};
+
+static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
+ &scc_ecb_des,
+ &scc_cbc_des,
+};
+
+static int mxc_scc_crypto_register(struct mxc_scc *scc)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
+ scc_crypto_algs[i]->scc = scc;
+ err = crypto_register_alg(&scc_crypto_algs[i]->alg);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (--i >= 0)
+ crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+
+ return err;
+}
+
+static void mxc_scc_crypto_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
+ crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+}
+
+static int mxc_scc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct mxc_scc *scc;
+ enum mxc_scc_state state;
+ int irq;
+ int ret;
+ int i;
+
+ scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
+ if (!scc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scc->base))
+ return PTR_ERR(scc->base);
+
+ scc->clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(scc->clk)) {
+ dev_err(dev, "Could not get ipg clock\n");
+ return PTR_ERR(scc->clk);
+ }
+
+ clk_prepare_enable(scc->clk);
+
+ /* clear error status register */
+ writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
+
+ /* clear interrupt control registers */
+ writel(SCC_SCM_INTR_CTRL_CLR_INTR |
+ SCC_SCM_INTR_CTRL_MASK_INTR,
+ scc->base + SCC_SCM_INTR_CTRL);
+
+ writel(SCC_SMN_COMMAND_CLR_INTR |
+ SCC_SMN_COMMAND_EN_INTR,
+ scc->base + SCC_SMN_COMMAND);
+
+ scc->dev = dev;
+ platform_set_drvdata(pdev, scc);
+
+ ret = mxc_scc_get_config(scc);
+ if (ret)
+ goto err_out;
+
+ state = mxc_scc_get_state(scc);
+
+ if (state != SCC_STATE_OK) {
+ dev_err(dev, "SCC in unusable state %d\n", state);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ mxc_scc_hw_init(scc);
+
+ spin_lock_init(&scc->lock);
+ /* FIXME: calculate queue from RAM slots */
+ crypto_init_queue(&scc->queue, 50);
+
+ for (i = 0; i < 2; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ dev_err(dev, "failed to get irq resource\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
+ IRQF_ONESHOT, dev_name(dev), scc);
+ if (ret)
+ goto err_out;
+ }
+
+ ret = mxc_scc_crypto_register(scc);
+ if (ret) {
+ dev_err(dev, "could not register algorithms");
+ goto err_out;
+ }
+
+ dev_info(dev, "registered successfully.\n");
+
+ return 0;
+
+err_out:
+ clk_disable_unprepare(scc->clk);
+
+ return ret;
+}
+
+static int mxc_scc_remove(struct platform_device *pdev)
+{
+ struct mxc_scc *scc = platform_get_drvdata(pdev);
+
+ mxc_scc_crypto_unregister();
+
+ clk_disable_unprepare(scc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mxc_scc_dt_ids[] = {
+ { .compatible = "fsl,imx25-scc", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
+
+static struct platform_driver mxc_scc_driver = {
+ .probe = mxc_scc_probe,
+ .remove = mxc_scc_remove,
+ .driver = {
+ .name = "mxc-scc",
+ .of_match_table = mxc_scc_dt_ids,
+ },
+};
+
+module_platform_driver(mxc_scc_driver);
+MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index b85a7a7dbf63..c5aac25a5738 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1598,7 +1598,7 @@ static void *new_queue(unsigned long q_type)
static void free_queue(void *p, unsigned long q_type)
{
- return kmem_cache_free(queue_cache[q_type - 1], p);
+ kmem_cache_free(queue_cache[q_type - 1], p);
}
static int queue_cache_init(void)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index d420ec751c7c..ce174d3b842c 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -26,7 +26,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -176,9 +175,7 @@ struct omap_aes_dev {
struct scatter_walk in_walk;
struct scatter_walk out_walk;
- int dma_in;
struct dma_chan *dma_lch_in;
- int dma_out;
struct dma_chan *dma_lch_out;
int in_sg_len;
int out_sg_len;
@@ -351,30 +348,21 @@ static void omap_aes_dma_out_callback(void *data)
static int omap_aes_dma_init(struct omap_aes_dev *dd)
{
- int err = -ENOMEM;
- dma_cap_mask_t mask;
+ int err;
dd->dma_lch_out = NULL;
dd->dma_lch_in = NULL;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dd->dma_lch_in = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_in,
- dd->dev, "rx");
- if (!dd->dma_lch_in) {
+ dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_in)) {
dev_err(dd->dev, "Unable to request in DMA channel\n");
- goto err_dma_in;
+ return PTR_ERR(dd->dma_lch_in);
}
- dd->dma_lch_out = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_out,
- dd->dev, "tx");
- if (!dd->dma_lch_out) {
+ dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_out)) {
dev_err(dd->dev, "Unable to request out DMA channel\n");
+ err = PTR_ERR(dd->dma_lch_out);
goto err_dma_out;
}
@@ -382,14 +370,15 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
err_dma_out:
dma_release_channel(dd->dma_lch_in);
-err_dma_in:
- if (err)
- pr_err("error: %d\n", err);
+
return err;
}
static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
{
+ if (dd->pio_only)
+ return;
+
dma_release_channel(dd->dma_lch_out);
dma_release_channel(dd->dma_lch_in);
}
@@ -1080,9 +1069,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
goto err;
}
- dd->dma_out = -1; /* Dummy value that's unused */
- dd->dma_in = -1; /* Dummy value that's unused */
-
dd->pdata = match->data;
err:
@@ -1116,24 +1102,6 @@ static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
}
memcpy(res, r, sizeof(*res));
- /* Get the DMA out channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(dev, "no DMA out resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_out = r->start;
-
- /* Get the DMA in channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!r) {
- dev_err(dev, "no DMA in resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_in = r->start;
-
/* Only OMAP2/3 can be non-DT */
dd->pdata = &omap_aes_pdata_omap2;
@@ -1191,7 +1159,9 @@ static int omap_aes_probe(struct platform_device *pdev)
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
err = omap_aes_dma_init(dd);
- if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
+ if (err == -EPROBE_DEFER) {
+ goto err_irq;
+ } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
dd->pio_only = 1;
irq = platform_get_irq(pdev, 0);
@@ -1248,8 +1218,8 @@ err_algs:
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
- if (!dd->pio_only)
- omap_aes_dma_cleanup(dd);
+
+ omap_aes_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
pm_runtime_disable(dev);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index dd7b93f2f94c..3eedb03111ba 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -29,7 +29,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -39,6 +38,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/des.h>
+#include <crypto/algapi.h>
#define DST_MAXBURST 2
@@ -132,14 +132,10 @@ struct omap_des_dev {
unsigned long flags;
int err;
- /* spinlock used for queues */
- spinlock_t lock;
- struct crypto_queue queue;
-
struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
struct ablkcipher_request *req;
+ struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
* variable total_save as need it to calc page_order
@@ -158,9 +154,7 @@ struct omap_des_dev {
struct scatter_walk in_walk;
struct scatter_walk out_walk;
- int dma_in;
struct dma_chan *dma_lch_in;
- int dma_out;
struct dma_chan *dma_lch_out;
int in_sg_len;
int out_sg_len;
@@ -340,30 +334,21 @@ static void omap_des_dma_out_callback(void *data)
static int omap_des_dma_init(struct omap_des_dev *dd)
{
- int err = -ENOMEM;
- dma_cap_mask_t mask;
+ int err;
dd->dma_lch_out = NULL;
dd->dma_lch_in = NULL;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dd->dma_lch_in = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_in,
- dd->dev, "rx");
- if (!dd->dma_lch_in) {
+ dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_in)) {
dev_err(dd->dev, "Unable to request in DMA channel\n");
- goto err_dma_in;
+ return PTR_ERR(dd->dma_lch_in);
}
- dd->dma_lch_out = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_out,
- dd->dev, "tx");
- if (!dd->dma_lch_out) {
+ dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_out)) {
dev_err(dd->dev, "Unable to request out DMA channel\n");
+ err = PTR_ERR(dd->dma_lch_out);
goto err_dma_out;
}
@@ -371,14 +356,15 @@ static int omap_des_dma_init(struct omap_des_dev *dd)
err_dma_out:
dma_release_channel(dd->dma_lch_in);
-err_dma_in:
- if (err)
- pr_err("error: %d\n", err);
+
return err;
}
static void omap_des_dma_cleanup(struct omap_des_dev *dd)
{
+ if (dd->pio_only)
+ return;
+
dma_release_channel(dd->dma_lch_out);
dma_release_channel(dd->dma_lch_in);
}
@@ -520,9 +506,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
pr_debug("err: %d\n", err);
pm_runtime_put(dd->dev);
- dd->flags &= ~FLAGS_BUSY;
-
- req->base.complete(&req->base, err);
+ crypto_finalize_request(dd->engine, req, err);
}
static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
@@ -585,34 +569,24 @@ static int omap_des_copy_sgs(struct omap_des_dev *dd)
}
static int omap_des_handle_queue(struct omap_des_dev *dd,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
- struct crypto_async_request *async_req, *backlog;
- struct omap_des_ctx *ctx;
- struct omap_des_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
+ return crypto_transfer_request_to_engine(dd->engine, req);
- if (!async_req)
- return ret;
+ return 0;
+}
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+static int omap_des_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_des_dev *dd = omap_des_find_dev(ctx);
+ struct omap_des_reqctx *rctx;
- req = ablkcipher_request_cast(async_req);
+ if (!dd)
+ return -ENODEV;
/* assign new request to device */
dd->req = req;
@@ -642,16 +616,20 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
dd->ctx = ctx;
ctx->dd = dd;
- err = omap_des_write_ctrl(dd);
- if (!err)
- err = omap_des_crypt_dma_start(dd);
- if (err) {
- /* des_task will not finish it, so do it here */
- omap_des_finish_req(dd, err);
- tasklet_schedule(&dd->queue_task);
- }
+ return omap_des_write_ctrl(dd);
+}
+
+static int omap_des_crypt_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_des_dev *dd = omap_des_find_dev(ctx);
+
+ if (!dd)
+ return -ENODEV;
- return ret; /* return ret, which is enqueue return value */
+ return omap_des_crypt_dma_start(dd);
}
static void omap_des_done_task(unsigned long data)
@@ -683,18 +661,10 @@ static void omap_des_done_task(unsigned long data)
}
omap_des_finish_req(dd, 0);
- omap_des_handle_queue(dd, NULL);
pr_debug("exit\n");
}
-static void omap_des_queue_task(unsigned long data)
-{
- struct omap_des_dev *dd = (struct omap_des_dev *)data;
-
- omap_des_handle_queue(dd, NULL);
-}
-
static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
@@ -999,8 +969,6 @@ static int omap_des_get_of(struct omap_des_dev *dd,
return -EINVAL;
}
- dd->dma_out = -1; /* Dummy value that's unused */
- dd->dma_in = -1; /* Dummy value that's unused */
dd->pdata = match->data;
return 0;
@@ -1016,33 +984,10 @@ static int omap_des_get_of(struct omap_des_dev *dd,
static int omap_des_get_pdev(struct omap_des_dev *dd,
struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct resource *r;
- int err = 0;
-
- /* Get the DMA out channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(dev, "no DMA out resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_out = r->start;
-
- /* Get the DMA in channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!r) {
- dev_err(dev, "no DMA in resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_in = r->start;
-
/* non-DT devices get pdata from pdev */
dd->pdata = pdev->dev.platform_data;
-err:
- return err;
+ return 0;
}
static int omap_des_probe(struct platform_device *pdev)
@@ -1062,9 +1007,6 @@ static int omap_des_probe(struct platform_device *pdev)
dd->dev = dev;
platform_set_drvdata(pdev, dd);
- spin_lock_init(&dd->lock);
- crypto_init_queue(&dd->queue, OMAP_DES_QUEUE_LENGTH);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "no MEM resource info\n");
@@ -1103,10 +1045,11 @@ static int omap_des_probe(struct platform_device *pdev)
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
- tasklet_init(&dd->queue_task, omap_des_queue_task, (unsigned long)dd);
err = omap_des_dma_init(dd);
- if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
+ if (err == -EPROBE_DEFER) {
+ goto err_irq;
+ } else if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
dd->pio_only = 1;
irq = platform_get_irq(pdev, 0);
@@ -1144,17 +1087,30 @@ static int omap_des_probe(struct platform_device *pdev)
}
}
+ /* Initialize des crypto engine */
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine)
+ goto err_algs;
+
+ dd->engine->prepare_request = omap_des_prepare_req;
+ dd->engine->crypt_one_request = omap_des_crypt_req;
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine;
+
return 0;
+
+err_engine:
+ crypto_engine_exit(dd->engine);
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
- if (!dd->pio_only)
- omap_des_dma_cleanup(dd);
+
+ omap_des_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
err_get:
pm_runtime_disable(dev);
err_res:
@@ -1182,7 +1138,6 @@ static int omap_des_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
dd = NULL;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 48adb2a0903e..6eefaa2fe58f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -29,7 +29,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -219,7 +218,6 @@ struct omap_sham_dev {
int irq;
spinlock_t lock;
int err;
- unsigned int dma;
struct dma_chan *dma_lch;
struct tasklet_struct done_task;
u8 polling_mode;
@@ -1842,7 +1840,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err;
}
- dd->dma = -1; /* Dummy value that's unused */
dd->pdata = match->data;
err:
@@ -1884,15 +1881,6 @@ static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
goto err;
}
- /* Get the DMA */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(dev, "no DMA resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma = r->start;
-
/* Only OMAP2/3 can be non-DT */
dd->pdata = &omap_sham_pdata_omap2;
@@ -1946,9 +1934,12 @@ static int omap_sham_probe(struct platform_device *pdev)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &dd->dma, dev, "rx");
- if (!dd->dma_lch) {
+ dd->dma_lch = dma_request_chan(dev, "rx");
+ if (IS_ERR(dd->dma_lch)) {
+ err = PTR_ERR(dd->dma_lch);
+ if (err == -EPROBE_DEFER)
+ goto data_err;
+
dd->polling_mode = 1;
dev_dbg(dev, "using polling mode instead of dma\n");
}
@@ -1995,7 +1986,7 @@ err_algs:
&dd->pdata->algs_info[i].algs_list[j]);
err_pm:
pm_runtime_disable(dev);
- if (dd->dma_lch)
+ if (dd->polling_mode)
dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
@@ -2021,7 +2012,7 @@ static int omap_sham_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
- if (dd->dma_lch)
+ if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
return 0;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index e13bd08ddd1e..640c3fc870fd 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -300,9 +300,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 1af321c2ce1a..d2d0ae445fd8 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c3xxxiov_class;
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1ac4ae90e072..949d77b79fbe 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 512c56509718..bc5cbc193aae 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -300,9 +300,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index baf4b509c892..38e4bc04f407 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c62xiov_class;
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index d2e4b928f3be..7540ce13b0d0 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 29c7c53d2845..6d74b91f2152 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -9,7 +9,6 @@ clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_isr.o \
- adf_vf_isr.o \
adf_ctl_drv.o \
adf_dev_mgr.o \
adf_init.o \
@@ -27,4 +26,5 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
+ adf_vf2pf_msg.o adf_vf_isr.o
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index eb557f69e367..ce7c4626c983 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -61,7 +61,7 @@
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32
-static const u8 const_tab[1024] = {
+static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 13575111382c..7632ed0f25c5 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -57,10 +57,8 @@
#define ADF_RING_DC_SIZE "NumConcurrentRequests"
#define ADF_RING_ASYM_TX "RingAsymTx"
#define ADF_RING_SYM_TX "RingSymTx"
-#define ADF_RING_RND_TX "RingNrbgTx"
#define ADF_RING_ASYM_RX "RingAsymRx"
#define ADF_RING_SYM_RX "RingSymRx"
-#define ADF_RING_RND_RX "RingNrbgRx"
#define ADF_RING_DC_TX "RingTx"
#define ADF_RING_DC_RX "RingRx"
#define ADF_ETRMGR_BANK "Bank"
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 976b01e58afb..75faa39bc8d0 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -67,7 +67,7 @@
#define ADF_STATUS_AE_INITIALISED 4
#define ADF_STATUS_AE_UCODE_LOADED 5
#define ADF_STATUS_AE_STARTED 6
-#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_PF_RUNNING 7
#define ADF_STATUS_IRQ_ALLOCATED 8
enum adf_dev_reset_mode {
@@ -103,7 +103,7 @@ int adf_service_unregister(struct service_hndl *service);
int adf_dev_init(struct adf_accel_dev *accel_dev);
int adf_dev_start(struct adf_accel_dev *accel_dev);
-int adf_dev_stop(struct adf_accel_dev *accel_dev);
+void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
@@ -236,8 +236,13 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
#else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
@@ -256,6 +261,15 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
+static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
static inline int adf_init_pf_wq(void)
{
return 0;
@@ -264,5 +278,15 @@ static inline int adf_init_pf_wq(void)
static inline void adf_exit_pf_wq(void)
{
}
+
+static inline int adf_init_vf_wq(void)
+{
+ return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 3c3f948290ca..abc7a7f64d64 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -270,26 +270,33 @@ static int adf_ctl_is_device_in_use(int id)
return 0;
}
-static int adf_ctl_stop_devices(uint32_t id)
+static void adf_ctl_stop_devices(uint32_t id)
{
struct adf_accel_dev *accel_dev;
- int ret = 0;
- list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) {
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev))
continue;
- if (adf_dev_stop(accel_dev)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to stop qat_dev%d\n", id);
- ret = -EFAULT;
- } else {
- adf_dev_shutdown(accel_dev);
- }
+ /* First stop all VFs */
+ if (!accel_dev->is_vf)
+ continue;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+ }
+ }
+
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+ if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (!adf_dev_started(accel_dev))
+ continue;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
}
}
- return ret;
}
static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
@@ -318,9 +325,8 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
ctl_data->device_id);
- ret = adf_ctl_stop_devices(ctl_data->device_id);
- if (ret)
- pr_err("QAT: failed to stop device.\n");
+ adf_ctl_stop_devices(ctl_data->device_id);
+
out:
kfree(ctl_data);
return ret;
@@ -465,12 +471,17 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_pf_wq())
goto err_pf_wq;
+ if (adf_init_vf_wq())
+ goto err_vf_wq;
+
if (qat_crypto_register())
goto err_crypto_register;
return 0;
err_crypto_register:
+ adf_exit_vf_wq();
+err_vf_wq:
adf_exit_pf_wq();
err_pf_wq:
adf_exit_aer();
@@ -485,6 +496,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
adf_exit_aer();
+ adf_exit_vf_wq();
adf_exit_pf_wq();
qat_crypto_unregister();
adf_clean_vf_map(false);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index ef5575e4a215..888c6675e7e5 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -236,9 +236,9 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
* is shuting down.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code otherwise.
+ * Return: void
*/
-int adf_dev_stop(struct adf_accel_dev *accel_dev)
+void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
@@ -246,9 +246,9 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
int ret;
if (!adf_dev_started(accel_dev) &&
- !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
- return 0;
- }
+ !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+ return;
+
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -279,8 +279,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_stop);
@@ -329,6 +327,8 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, &service->init_status);
}
+ hw_data->disable_iov(accel_dev);
+
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
@@ -344,7 +344,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
- hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev);
}
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index b81f79acc4ea..06d49017a52b 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -302,7 +302,7 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
}
/**
- * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * adf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function frees interrupts for acceleration device.
@@ -317,7 +317,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
/**
- * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function allocates interrupts for acceleration device.
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 38a0415e767d..4a526e2f1d7f 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -249,13 +249,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- if (adf_dev_stop(accel_dev)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to stop qat_dev%d\n",
- accel_dev->accel_id);
- return -EFAULT;
- }
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
new file mode 100644
index 000000000000..cd5f37dffe8a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -0,0 +1,92 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+/**
+ * adf_vf2pf_init() - send init msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends an init messge from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+
+/**
+ * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown messge from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+ if (adf_iov_putmsg(accel_dev, msg, 0))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 09427b3d4d55..aa689cabedb4 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -51,6 +51,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
@@ -64,6 +65,13 @@
#define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1)
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+ struct adf_accel_dev *accel_dev;
+ struct work_struct work;
+};
+
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -90,6 +98,20 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev)
pci_disable_msi(pdev);
}
+static void adf_dev_stop_async(struct work_struct *work)
+{
+ struct adf_vf_stop_data *stop_data =
+ container_of(work, struct adf_vf_stop_data, work);
+ struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+ kfree(stop_data);
+}
+
static void adf_pf2vf_bh_handler(void *data)
{
struct adf_accel_dev *accel_dev = data;
@@ -107,11 +129,29 @@ static void adf_pf2vf_bh_handler(void *data)
goto err;
switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
- case ADF_PF2VF_MSGTYPE_RESTARTING:
+ case ADF_PF2VF_MSGTYPE_RESTARTING: {
+ struct adf_vf_stop_data *stop_data;
+
dev_dbg(&GET_DEV(accel_dev),
"Restarting msg received from PF 0x%x\n", msg);
- adf_dev_stop(accel_dev);
- break;
+
+ clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+ stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+ if (!stop_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't schedule stop for vf_%d\n",
+ accel_dev->accel_id);
+ return;
+ }
+ stop_data->accel_dev = accel_dev;
+ INIT_WORK(&stop_data->work, adf_dev_stop_async);
+ queue_work(adf_vf_stop_wq, &stop_data->work);
+ /* To ack, clear the PF2VFINT bit */
+ msg &= ~BIT(0);
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+ return;
+ }
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
dev_dbg(&GET_DEV(accel_dev),
"Version resp received from PF 0x%x\n", msg);
@@ -278,3 +318,18 @@ err_out:
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+int __init adf_init_vf_wq(void)
+{
+ adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq");
+
+ return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+ if (adf_vf_stop_wq)
+ destroy_workqueue(adf_vf_stop_wq);
+
+ adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index e5c0727d4876..05f49d4f94b2 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -593,7 +593,7 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
ret = -ENOMEM;
ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
- if (!ctx->n)
+ if (!ctx->d)
goto err;
memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
@@ -711,7 +711,7 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
}
qat_crypto_put_instance(ctx->inst);
ctx->n = NULL;
- ctx->d = NULL;
+ ctx->e = NULL;
ctx->d = NULL;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index a8c4b92a7cbd..4d2de2838451 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -302,9 +302,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index dc04ab68d24d..a3b4dd8099a7 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcciov_class;
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index f8cc4bf0a50c..60df98632fa2 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 5f161a9777e3..2b3a0cfe3331 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -11,65 +11,64 @@
*
*/
-#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/algapi.h>
-#include <crypto/aes.h>
#include <crypto/ctr.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
#define _SBF(s, v) ((v) << (s))
-#define _BIT(b) _SBF(b, 1)
/* Feed control registers */
#define SSS_REG_FCINTSTAT 0x0000
-#define SSS_FCINTSTAT_BRDMAINT _BIT(3)
-#define SSS_FCINTSTAT_BTDMAINT _BIT(2)
-#define SSS_FCINTSTAT_HRDMAINT _BIT(1)
-#define SSS_FCINTSTAT_PKDMAINT _BIT(0)
+#define SSS_FCINTSTAT_BRDMAINT BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT BIT(0)
#define SSS_REG_FCINTENSET 0x0004
-#define SSS_FCINTENSET_BRDMAINTENSET _BIT(3)
-#define SSS_FCINTENSET_BTDMAINTENSET _BIT(2)
-#define SSS_FCINTENSET_HRDMAINTENSET _BIT(1)
-#define SSS_FCINTENSET_PKDMAINTENSET _BIT(0)
+#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
#define SSS_REG_FCINTENCLR 0x0008
-#define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3)
-#define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2)
-#define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1)
-#define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0)
+#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
#define SSS_REG_FCINTPEND 0x000C
-#define SSS_FCINTPEND_BRDMAINTP _BIT(3)
-#define SSS_FCINTPEND_BTDMAINTP _BIT(2)
-#define SSS_FCINTPEND_HRDMAINTP _BIT(1)
-#define SSS_FCINTPEND_PKDMAINTP _BIT(0)
+#define SSS_FCINTPEND_BRDMAINTP BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP BIT(0)
#define SSS_REG_FCFIFOSTAT 0x0010
-#define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7)
-#define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6)
-#define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5)
-#define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4)
-#define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3)
-#define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2)
-#define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1)
-#define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0)
+#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
#define SSS_REG_FCFIFOCTRL 0x0014
-#define SSS_FCFIFOCTRL_DESSEL _BIT(2)
+#define SSS_FCFIFOCTRL_DESSEL BIT(2)
#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
@@ -77,52 +76,52 @@
#define SSS_REG_FCBRDMAS 0x0020
#define SSS_REG_FCBRDMAL 0x0024
#define SSS_REG_FCBRDMAC 0x0028
-#define SSS_FCBRDMAC_BYTESWAP _BIT(1)
-#define SSS_FCBRDMAC_FLUSH _BIT(0)
+#define SSS_FCBRDMAC_BYTESWAP BIT(1)
+#define SSS_FCBRDMAC_FLUSH BIT(0)
#define SSS_REG_FCBTDMAS 0x0030
#define SSS_REG_FCBTDMAL 0x0034
#define SSS_REG_FCBTDMAC 0x0038
-#define SSS_FCBTDMAC_BYTESWAP _BIT(1)
-#define SSS_FCBTDMAC_FLUSH _BIT(0)
+#define SSS_FCBTDMAC_BYTESWAP BIT(1)
+#define SSS_FCBTDMAC_FLUSH BIT(0)
#define SSS_REG_FCHRDMAS 0x0040
#define SSS_REG_FCHRDMAL 0x0044
#define SSS_REG_FCHRDMAC 0x0048
-#define SSS_FCHRDMAC_BYTESWAP _BIT(1)
-#define SSS_FCHRDMAC_FLUSH _BIT(0)
+#define SSS_FCHRDMAC_BYTESWAP BIT(1)
+#define SSS_FCHRDMAC_FLUSH BIT(0)
#define SSS_REG_FCPKDMAS 0x0050
#define SSS_REG_FCPKDMAL 0x0054
#define SSS_REG_FCPKDMAC 0x0058
-#define SSS_FCPKDMAC_BYTESWAP _BIT(3)
-#define SSS_FCPKDMAC_DESCEND _BIT(2)
-#define SSS_FCPKDMAC_TRANSMIT _BIT(1)
-#define SSS_FCPKDMAC_FLUSH _BIT(0)
+#define SSS_FCPKDMAC_BYTESWAP BIT(3)
+#define SSS_FCPKDMAC_DESCEND BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT BIT(1)
+#define SSS_FCPKDMAC_FLUSH BIT(0)
#define SSS_REG_FCPKDMAO 0x005C
/* AES registers */
#define SSS_REG_AES_CONTROL 0x00
-#define SSS_AES_BYTESWAP_DI _BIT(11)
-#define SSS_AES_BYTESWAP_DO _BIT(10)
-#define SSS_AES_BYTESWAP_IV _BIT(9)
-#define SSS_AES_BYTESWAP_CNT _BIT(8)
-#define SSS_AES_BYTESWAP_KEY _BIT(7)
-#define SSS_AES_KEY_CHANGE_MODE _BIT(6)
+#define SSS_AES_BYTESWAP_DI BIT(11)
+#define SSS_AES_BYTESWAP_DO BIT(10)
+#define SSS_AES_BYTESWAP_IV BIT(9)
+#define SSS_AES_BYTESWAP_CNT BIT(8)
+#define SSS_AES_BYTESWAP_KEY BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE BIT(6)
#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
-#define SSS_AES_FIFO_MODE _BIT(3)
+#define SSS_AES_FIFO_MODE BIT(3)
#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
-#define SSS_AES_MODE_DECRYPT _BIT(0)
+#define SSS_AES_MODE_DECRYPT BIT(0)
#define SSS_REG_AES_STATUS 0x04
-#define SSS_AES_BUSY _BIT(2)
-#define SSS_AES_INPUT_READY _BIT(1)
-#define SSS_AES_OUTPUT_READY _BIT(0)
+#define SSS_AES_BUSY BIT(2)
+#define SSS_AES_INPUT_READY BIT(1)
+#define SSS_AES_OUTPUT_READY BIT(0)
#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
@@ -139,7 +138,7 @@
SSS_AES_REG(dev, reg))
/* HW engine modes */
-#define FLAGS_AES_DECRYPT _BIT(0)
+#define FLAGS_AES_DECRYPT BIT(0)
#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
#define FLAGS_AES_CBC _SBF(1, 0x01)
#define FLAGS_AES_CTR _SBF(1, 0x02)
@@ -149,7 +148,6 @@
/**
* struct samsung_aes_variant - platform specific SSS driver data
- * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
* @aes_offset: AES register offset from SSS module's base.
*
* Specifies platform specific configuration of SSS module.
@@ -157,7 +155,6 @@
* expansion of its usage.
*/
struct samsung_aes_variant {
- bool has_hash_irq;
unsigned int aes_offset;
};
@@ -178,7 +175,6 @@ struct s5p_aes_dev {
struct clk *clk;
void __iomem *ioaddr;
void __iomem *aes_ioaddr;
- int irq_hash;
int irq_fc;
struct ablkcipher_request *req;
@@ -186,6 +182,10 @@ struct s5p_aes_dev {
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
+ /* In case of unaligned access: */
+ struct scatterlist *sg_src_cpy;
+ struct scatterlist *sg_dst_cpy;
+
struct tasklet_struct tasklet;
struct crypto_queue queue;
bool busy;
@@ -197,12 +197,10 @@ struct s5p_aes_dev {
static struct s5p_aes_dev *s5p_dev;
static const struct samsung_aes_variant s5p_aes_data = {
- .has_hash_irq = true,
.aes_offset = 0x4000,
};
static const struct samsung_aes_variant exynos_aes_data = {
- .has_hash_irq = false,
.aes_offset = 0x200,
};
@@ -245,8 +243,45 @@ static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
}
+static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
+{
+ int len;
+
+ if (!*sg)
+ return;
+
+ len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ free_pages((unsigned long)sg_virt(*sg), get_order(len));
+
+ kfree(*sg);
+ *sg = NULL;
+}
+
+static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
{
+ if (dev->sg_dst_cpy) {
+ dev_dbg(dev->dev,
+ "Copying %d bytes of output data back to original place\n",
+ dev->req->nbytes);
+ s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
+ dev->req->nbytes, 1);
+ }
+ s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+ s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+
/* holding a lock outside */
dev->req->base.complete(&dev->req->base, err);
dev->busy = false;
@@ -262,15 +297,37 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
}
+static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
+ struct scatterlist **dst)
+{
+ void *pages;
+ int len;
+
+ *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
+ if (!*dst)
+ return -ENOMEM;
+
+ len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
+ if (!pages) {
+ kfree(*dst);
+ *dst = NULL;
+ return -ENOMEM;
+ }
+
+ s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+
+ sg_init_table(*dst, 1);
+ sg_set_buf(*dst, pages, len);
+
+ return 0;
+}
+
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
int err;
- if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
- err = -EINVAL;
- goto exit;
- }
- if (!sg_dma_len(sg)) {
+ if (!sg->length) {
err = -EINVAL;
goto exit;
}
@@ -284,7 +341,7 @@ static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
dev->sg_dst = sg;
err = 0;
- exit:
+exit:
return err;
}
@@ -292,11 +349,7 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
int err;
- if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
- err = -EINVAL;
- goto exit;
- }
- if (!sg_dma_len(sg)) {
+ if (!sg->length) {
err = -EINVAL;
goto exit;
}
@@ -310,47 +363,59 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
dev->sg_src = sg;
err = 0;
- exit:
+exit:
return err;
}
-static void s5p_aes_tx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new transmitting (output) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_outdata()). False otherwise.
+ */
+static bool s5p_aes_tx(struct s5p_aes_dev *dev)
{
int err = 0;
+ bool ret = false;
s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) {
err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
- if (err) {
+ if (err)
s5p_aes_complete(dev, err);
- return;
- }
-
- s5p_set_dma_outdata(dev, dev->sg_dst);
+ else
+ ret = true;
} else {
s5p_aes_complete(dev, err);
dev->busy = true;
tasklet_schedule(&dev->tasklet);
}
+
+ return ret;
}
-static void s5p_aes_rx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new receiving (input) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_indata()). False otherwise.
+ */
+static bool s5p_aes_rx(struct s5p_aes_dev *dev)
{
int err;
+ bool ret = false;
s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) {
err = s5p_set_indata(dev, sg_next(dev->sg_src));
- if (err) {
+ if (err)
s5p_aes_complete(dev, err);
- return;
- }
-
- s5p_set_dma_indata(dev, dev->sg_src);
+ else
+ ret = true;
}
+
+ return ret;
}
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
@@ -359,18 +424,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
uint32_t status;
unsigned long flags;
+ bool set_dma_tx = false;
+ bool set_dma_rx = false;
spin_lock_irqsave(&dev->lock, flags);
- if (irq == dev->irq_fc) {
- status = SSS_READ(dev, FCINTSTAT);
- if (status & SSS_FCINTSTAT_BRDMAINT)
- s5p_aes_rx(dev);
- if (status & SSS_FCINTSTAT_BTDMAINT)
- s5p_aes_tx(dev);
-
- SSS_WRITE(dev, FCINTPEND, status);
- }
+ status = SSS_READ(dev, FCINTSTAT);
+ if (status & SSS_FCINTSTAT_BRDMAINT)
+ set_dma_rx = s5p_aes_rx(dev);
+ if (status & SSS_FCINTSTAT_BTDMAINT)
+ set_dma_tx = s5p_aes_tx(dev);
+
+ SSS_WRITE(dev, FCINTPEND, status);
+
+ /*
+ * Writing length of DMA block (either receiving or transmitting)
+ * will start the operation immediately, so this should be done
+ * at the end (even after clearing pending interrupts to not miss the
+ * interrupt).
+ */
+ if (set_dma_tx)
+ s5p_set_dma_outdata(dev, dev->sg_dst);
+ if (set_dma_rx)
+ s5p_set_dma_indata(dev, dev->sg_src);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -395,6 +471,71 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
memcpy_toio(keystart, key, keylen);
}
+static bool s5p_is_sg_aligned(struct scatterlist *sg)
+{
+ while (sg) {
+ if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ return false;
+ sg = sg_next(sg);
+ }
+
+ return true;
+}
+
+static int s5p_set_indata_start(struct s5p_aes_dev *dev,
+ struct ablkcipher_request *req)
+{
+ struct scatterlist *sg;
+ int err;
+
+ dev->sg_src_cpy = NULL;
+ sg = req->src;
+ if (!s5p_is_sg_aligned(sg)) {
+ dev_dbg(dev->dev,
+ "At least one unaligned source scatter list, making a copy\n");
+ err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
+ if (err)
+ return err;
+
+ sg = dev->sg_src_cpy;
+ }
+
+ err = s5p_set_indata(dev, sg);
+ if (err) {
+ s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+ return err;
+ }
+
+ return 0;
+}
+
+static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
+ struct ablkcipher_request *req)
+{
+ struct scatterlist *sg;
+ int err;
+
+ dev->sg_dst_cpy = NULL;
+ sg = req->dst;
+ if (!s5p_is_sg_aligned(sg)) {
+ dev_dbg(dev->dev,
+ "At least one unaligned dest scatter list, making a copy\n");
+ err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
+ if (err)
+ return err;
+
+ sg = dev->sg_dst_cpy;
+ }
+
+ err = s5p_set_outdata(dev, sg);
+ if (err) {
+ s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+ return err;
+ }
+
+ return 0;
+}
+
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
@@ -431,19 +572,19 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
SSS_WRITE(dev, FCFIFOCTRL, 0x00);
- err = s5p_set_indata(dev, req->src);
+ err = s5p_set_indata_start(dev, req);
if (err)
goto indata_error;
- err = s5p_set_outdata(dev, req->dst);
+ err = s5p_set_outdata_start(dev, req);
if (err)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
- s5p_set_dma_indata(dev, req->src);
- s5p_set_dma_outdata(dev, req->dst);
+ s5p_set_dma_indata(dev, dev->sg_src);
+ s5p_set_dma_outdata(dev, dev->sg_dst);
SSS_WRITE(dev, FCINTENSET,
SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
@@ -452,10 +593,10 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
return;
- outdata_error:
+outdata_error:
s5p_unset_indata(dev);
- indata_error:
+indata_error:
s5p_aes_complete(dev, err);
spin_unlock_irqrestore(&dev->lock, flags);
}
@@ -506,7 +647,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
tasklet_schedule(&dev->tasklet);
- exit:
+exit:
return err;
}
@@ -671,21 +812,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_irq;
}
- if (variant->has_hash_irq) {
- pdata->irq_hash = platform_get_irq(pdev, 1);
- if (pdata->irq_hash < 0) {
- err = pdata->irq_hash;
- dev_warn(dev, "hash interrupt is not available.\n");
- goto err_irq;
- }
- err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
- IRQF_SHARED, pdev->name, pdev);
- if (err < 0) {
- dev_warn(dev, "hash interrupt is not available.\n");
- goto err_irq;
- }
- }
-
pdata->busy = false;
pdata->variant = variant;
pdata->dev = dev;
@@ -705,7 +831,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
return 0;
- err_algs:
+err_algs:
dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
for (j = 0; j < i; j++)
@@ -713,7 +839,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
tasklet_kill(&pdata->tasklet);
- err_irq:
+err_irq:
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 7be3fbcd8d78..3830d7c4e138 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
unsigned int todo;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
+ unsigned long flags;
if (areq->nbytes == 0)
return 0;
@@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
return -EINVAL;
}
- spin_lock_bh(&ss->slock);
+ spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@@ -117,7 +118,7 @@ release_ss:
sg_miter_stop(&mi);
sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
- spin_unlock_bh(&ss->slock);
+ spin_unlock_irqrestore(&ss->slock, flags);
return err;
}
@@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
unsigned int ob = 0; /* offset in buf */
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
+ unsigned long flags;
if (areq->nbytes == 0)
return 0;
@@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
if (no_chunk == 1)
return sun4i_ss_opti_poll(areq);
- spin_lock_bh(&ss->slock);
+ spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@@ -307,7 +309,7 @@ release_ss:
sg_miter_stop(&mi);
sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
- spin_unlock_bh(&ss->slock);
+ spin_unlock_irqrestore(&ss->slock, flags);
return err;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index aae05547b924..b7ee8d30147d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -835,6 +835,16 @@ struct talitos_ahash_req_ctx {
struct scatterlist *psrc;
};
+struct talitos_export_state {
+ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+ u8 buf[HASH_MAX_BLOCK_SIZE];
+ unsigned int swinit;
+ unsigned int first;
+ unsigned int last;
+ unsigned int to_hash_later;
+ unsigned int nbuf;
+};
+
static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
@@ -1981,6 +1991,46 @@ static int ahash_digest(struct ahash_request *areq)
return ahash_process_req(areq, areq->nbytes);
}
+static int ahash_export(struct ahash_request *areq, void *out)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_export_state *export = out;
+
+ memcpy(export->hw_context, req_ctx->hw_context,
+ req_ctx->hw_context_size);
+ memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
+ export->swinit = req_ctx->swinit;
+ export->first = req_ctx->first;
+ export->last = req_ctx->last;
+ export->to_hash_later = req_ctx->to_hash_later;
+ export->nbuf = req_ctx->nbuf;
+
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *areq, const void *in)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ const struct talitos_export_state *export = in;
+
+ memset(req_ctx, 0, sizeof(*req_ctx));
+ req_ctx->hw_context_size =
+ (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ memcpy(req_ctx->hw_context, export->hw_context,
+ req_ctx->hw_context_size);
+ memcpy(req_ctx->buf, export->buf, export->nbuf);
+ req_ctx->swinit = export->swinit;
+ req_ctx->first = export->first;
+ req_ctx->last = export->last;
+ req_ctx->to_hash_later = export->to_hash_later;
+ req_ctx->nbuf = export->nbuf;
+
+ return 0;
+}
+
struct keyhash_result {
struct completion completion;
int err;
@@ -2458,6 +2508,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
@@ -2473,6 +2524,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-talitos",
@@ -2488,6 +2540,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "sha224",
.cra_driver_name = "sha224-talitos",
@@ -2503,6 +2556,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-talitos",
@@ -2518,6 +2572,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-talitos",
@@ -2533,6 +2588,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-talitos",
@@ -2548,6 +2604,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
@@ -2563,6 +2620,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
@@ -2578,6 +2636,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
@@ -2593,6 +2652,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
@@ -2608,6 +2668,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
@@ -2623,6 +2684,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct talitos_export_state),
.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
@@ -2814,6 +2876,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
t_alg->algt.alg.hash.setkey = ahash_setkey;
+ t_alg->algt.alg.hash.import = ahash_import;
+ t_alg->algt.alg.hash.export = ahash_export;
if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) {
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index b9997335f193..9f4994cabcc7 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -139,6 +139,26 @@ my $vmr = sub {
" vor $vx,$vy,$vy";
};
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /aix|linux64le/);
+my $mtspr = sub {
+ my ($f,$idx,$ra) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " or $ra,$ra,$ra";
+ } else {
+ " mtspr $idx,$ra";
+ }
+};
+my $mfspr = sub {
+ my ($f,$rd,$idx) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " li $rd,-1";
+ } else {
+ " mfspr $rd,$idx";
+ }
+};
+
# PowerISA 2.06 stuff
sub vsxmem_op {
my ($f, $vrt, $ra, $rb, $op) = @_;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 4de78c552251..78dac0e9da11 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -64,30 +64,32 @@ config DEVFREQ_GOV_USERSPACE
Otherwise, the governor does not change the frequency
given at the initialization.
+config DEVFREQ_GOV_PASSIVE
+ tristate "Passive"
+ help
+ Sets the frequency based on the frequency of its parent devfreq
+ device. This governor does not change the frequency by itself
+ through sysfs entries. The passive governor recommends that
+ devfreq device uses the OPP table to get the frequency/voltage.
+
comment "DEVFREQ Drivers"
-config ARM_EXYNOS4_BUS_DEVFREQ
- bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
- depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
+config ARM_EXYNOS_BUS_DEVFREQ
+ bool "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ depends on ARCH_EXYNOS
select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DEVFREQ_GOV_PASSIVE
+ select DEVFREQ_EVENT_EXYNOS_PPMU
+ select PM_DEVFREQ_EVENT
select PM_OPP
help
- This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
- and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
- It reads PPMU counters of memory controllers and adjusts
- the operating frequencies and voltages with OPP support.
+ This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
+ Memory bus has one more group of memory bus (e.g, MIF and INT block).
+ Each memory bus group could contain many memoby bus block. It reads
+ PPMU counters of memory controllers by using DEVFREQ-event device
+ and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
-config ARM_EXYNOS5_BUS_DEVFREQ
- tristate "ARM Exynos5250 Bus DEVFREQ Driver"
- depends on SOC_EXYNOS5250
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select PM_OPP
- help
- This adds the DEVFREQ driver for Exynos5250 bus interface (vdd_int).
- It reads PPMU counters of memory controllers and adjusts the
- operating frequencies and voltages with OPP support.
-
config ARM_TEGRA_DEVFREQ
tristate "Tegra DEVFREQ Driver"
depends on ARCH_TEGRA_124_SOC
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 5134f9ee983d..09f11d9d40d5 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -4,10 +4,10 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
-obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
-obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/
+obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
# DEVFREQ Event Drivers
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 38bf144ca147..39b048eda2ce 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -235,6 +235,11 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
mutex_lock(&devfreq_event_list_lock);
list_for_each_entry(edev, &devfreq_event_list, node) {
+ if (edev->dev.parent && edev->dev.parent->of_node == node)
+ goto out;
+ }
+
+ list_for_each_entry(edev, &devfreq_event_list, node) {
if (!strcmp(edev->desc->name, node->name))
goto out;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 984c5e9e7bdd..1d6c803804d5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/printk.h>
#include <linux/hrtimer.h>
+#include <linux/of.h>
#include "governor.h"
static struct class *devfreq_class;
@@ -188,6 +189,29 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
return ERR_PTR(-ENODEV);
}
+static int devfreq_notify_transition(struct devfreq *devfreq,
+ struct devfreq_freqs *freqs, unsigned int state)
+{
+ if (!devfreq)
+ return -EINVAL;
+
+ switch (state) {
+ case DEVFREQ_PRECHANGE:
+ srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+ DEVFREQ_PRECHANGE, freqs);
+ break;
+
+ case DEVFREQ_POSTCHANGE:
+ srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+ DEVFREQ_POSTCHANGE, freqs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Load monitoring helper functions for governors use */
/**
@@ -199,7 +223,8 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
*/
int update_devfreq(struct devfreq *devfreq)
{
- unsigned long freq;
+ struct devfreq_freqs freqs;
+ unsigned long freq, cur_freq;
int err = 0;
u32 flags = 0;
@@ -233,10 +258,22 @@ int update_devfreq(struct devfreq *devfreq)
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
+ if (devfreq->profile->get_cur_freq)
+ devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
+ else
+ cur_freq = devfreq->previous_freq;
+
+ freqs.old = cur_freq;
+ freqs.new = freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
+
err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
if (err)
return err;
+ freqs.new = freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+
if (devfreq->profile->freq_table)
if (devfreq_update_status(devfreq, freq))
dev_err(&devfreq->dev,
@@ -541,6 +578,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
+ srcu_init_notifier_head(&devfreq->transition_notifier_list);
+
mutex_unlock(&devfreq->lock);
mutex_lock(&devfreq_list_lock);
@@ -639,6 +678,49 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
}
EXPORT_SYMBOL(devm_devfreq_add_device);
+#ifdef CONFIG_OF
+/*
+ * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of devfreq
+ *
+ * return the instance of devfreq device
+ */
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+ struct device_node *node;
+ struct devfreq *devfreq;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ if (!dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ node = of_parse_phandle(dev->of_node, "devfreq", index);
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ if (devfreq->dev.parent
+ && devfreq->dev.parent->of_node == node) {
+ mutex_unlock(&devfreq_list_lock);
+ return devfreq;
+ }
+ }
+ mutex_unlock(&devfreq_list_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+#else
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
+
/**
* devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
* @dev: the device to add devfreq feature.
@@ -1266,6 +1348,129 @@ void devm_devfreq_unregister_opp_notifier(struct device *dev,
}
EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
+/**
+ * devfreq_register_notifier() - Register a driver with devfreq
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to register.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ int ret = 0;
+
+ if (!devfreq)
+ return -EINVAL;
+
+ switch (list) {
+ case DEVFREQ_TRANSITION_NOTIFIER:
+ ret = srcu_notifier_chain_register(
+ &devfreq->transition_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(devfreq_register_notifier);
+
+/*
+ * devfreq_unregister_notifier() - Unregister a driver with devfreq
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to be unregistered.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ int ret = 0;
+
+ if (!devfreq)
+ return -EINVAL;
+
+ switch (list) {
+ case DEVFREQ_TRANSITION_NOTIFIER:
+ ret = srcu_notifier_chain_unregister(
+ &devfreq->transition_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(devfreq_unregister_notifier);
+
+struct devfreq_notifier_devres {
+ struct devfreq *devfreq;
+ struct notifier_block *nb;
+ unsigned int list;
+};
+
+static void devm_devfreq_notifier_release(struct device *dev, void *res)
+{
+ struct devfreq_notifier_devres *this = res;
+
+ devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
+}
+
+/**
+ * devm_devfreq_register_notifier()
+ - Resource-managed devfreq_register_notifier()
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to be unregistered.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devm_devfreq_register_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ struct devfreq_notifier_devres *ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = devfreq_register_notifier(devfreq, nb, list);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ ptr->devfreq = devfreq;
+ ptr->nb = nb;
+ ptr->list = list;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_devfreq_register_notifier);
+
+/**
+ * devm_devfreq_unregister_notifier()
+ - Resource-managed devfreq_unregister_notifier()
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to be unregistered.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+void devm_devfreq_unregister_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
+ devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
+
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("devfreq class support");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index a11720affc31..1e8b4f469f38 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -13,6 +13,14 @@ menuconfig PM_DEVFREQ_EVENT
if PM_DEVFREQ_EVENT
+config DEVFREQ_EVENT_EXYNOS_NOCP
+ bool "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ depends on ARCH_EXYNOS
+ select PM_OPP
+ help
+ This add the devfreq-event driver for Exynos SoC. It provides NoC
+ (Network on Chip) Probe counters to measure the bandwidth of AXI bus.
+
config DEVFREQ_EVENT_EXYNOS_PPMU
bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
index be146ead79cf..3d6afd352253 100644
--- a/drivers/devfreq/event/Makefile
+++ b/drivers/devfreq/event/Makefile
@@ -1,2 +1,4 @@
# Exynos DEVFREQ Event Drivers
+
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP) += exynos-nocp.o
obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
new file mode 100644
index 000000000000..6b6a5f310486
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -0,0 +1,304 @@
+/*
+ * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "exynos-nocp.h"
+
+struct exynos_nocp {
+ struct devfreq_event_dev *edev;
+ struct devfreq_event_desc desc;
+
+ struct device *dev;
+
+ struct regmap *regmap;
+ struct clk *clk;
+};
+
+/*
+ * The devfreq-event ops structure for nocp probe.
+ */
+static int exynos_nocp_set_event(struct devfreq_event_dev *edev)
+{
+ struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+ int ret;
+
+ /* Disable NoC probe */
+ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK, 0);
+ if (ret < 0) {
+ dev_err(nocp->dev, "failed to disable the NoC probe device\n");
+ return ret;
+ }
+
+ /* Set a statistics dump period to 0 */
+ ret = regmap_write(nocp->regmap, NOCP_STAT_PERIOD, 0x0);
+ if (ret < 0)
+ goto out;
+
+ /* Set the IntEvent fields of *_SRC */
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_BYTE_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_CYCLE_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+ if (ret < 0)
+ goto out;
+
+
+ /* Set an alarm with a max/min value of 0 to generate StatALARM */
+ ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MIN, 0x0);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MAX, 0x0);
+ if (ret < 0)
+ goto out;
+
+ /* Set AlarmMode */
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Enable the measurements by setting AlarmEn and StatEn */
+ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK,
+ NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Set GlobalEN */
+ ret = regmap_update_bits(nocp->regmap, NOCP_CFG_CTL,
+ NOCP_CFG_CTL_GLOBALEN_MASK,
+ NOCP_CFG_CTL_GLOBALEN_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Enable NoC probe */
+ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK,
+ NOCP_MAIN_CTL_STATEN_MASK);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ /* Reset NoC probe */
+ if (regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK, 0)) {
+ dev_err(nocp->dev, "Failed to reset NoC probe device\n");
+ }
+
+ return ret;
+}
+
+static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+ unsigned int counter[4];
+ int ret;
+
+ /* Read cycle count */
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_0_VAL, &counter[0]);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_1_VAL, &counter[1]);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_2_VAL, &counter[2]);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_3_VAL, &counter[3]);
+ if (ret < 0)
+ goto out;
+
+ edata->load_count = ((counter[1] << 16) | counter[0]);
+ edata->total_count = ((counter[3] << 16) | counter[2]);
+
+ dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+ edata->load_count, edata->total_count);
+
+ return 0;
+
+out:
+ edata->load_count = 0;
+ edata->total_count = 0;
+
+ dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
+
+ return ret;
+}
+
+static const struct devfreq_event_ops exynos_nocp_ops = {
+ .set_event = exynos_nocp_set_event,
+ .get_event = exynos_nocp_get_event,
+};
+
+static const struct of_device_id exynos_nocp_id_match[] = {
+ { .compatible = "samsung,exynos5420-nocp", },
+ { /* sentinel */ },
+};
+
+static struct regmap_config exynos_nocp_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = NOCP_COUNTERS_3_VAL,
+};
+
+static int exynos_nocp_parse_dt(struct platform_device *pdev,
+ struct exynos_nocp *nocp)
+{
+ struct device *dev = nocp->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ void __iomem *base;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
+ }
+
+ nocp->clk = devm_clk_get(dev, "nocp");
+ if (IS_ERR(nocp->clk))
+ nocp->clk = NULL;
+
+ /* Maps the memory mapped IO to control nocp register */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ exynos_nocp_regmap_config.max_register = resource_size(res) - 4;
+
+ nocp->regmap = devm_regmap_init_mmio(dev, base,
+ &exynos_nocp_regmap_config);
+ if (IS_ERR(nocp->regmap)) {
+ dev_err(dev, "failed to initialize regmap\n");
+ return PTR_ERR(nocp->regmap);
+ }
+
+ return 0;
+}
+
+static int exynos_nocp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos_nocp *nocp;
+ int ret;
+
+ nocp = devm_kzalloc(&pdev->dev, sizeof(*nocp), GFP_KERNEL);
+ if (!nocp)
+ return -ENOMEM;
+
+ nocp->dev = &pdev->dev;
+
+ /* Parse dt data to get resource */
+ ret = exynos_nocp_parse_dt(pdev, nocp);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to parse devicetree for resource\n");
+ return ret;
+ }
+
+ /* Add devfreq-event device to measure the bandwidth of NoC */
+ nocp->desc.ops = &exynos_nocp_ops;
+ nocp->desc.driver_data = nocp;
+ nocp->desc.name = np->full_name;
+ nocp->edev = devm_devfreq_event_add_edev(&pdev->dev, &nocp->desc);
+ if (IS_ERR(nocp->edev)) {
+ dev_err(&pdev->dev,
+ "failed to add devfreq-event device\n");
+ return PTR_ERR(nocp->edev);
+ }
+ platform_set_drvdata(pdev, nocp);
+
+ clk_prepare_enable(nocp->clk);
+
+ pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
+ dev_name(dev));
+
+ return 0;
+}
+
+static int exynos_nocp_remove(struct platform_device *pdev)
+{
+ struct exynos_nocp *nocp = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(nocp->clk);
+
+ return 0;
+}
+
+static struct platform_driver exynos_nocp_driver = {
+ .probe = exynos_nocp_probe,
+ .remove = exynos_nocp_remove,
+ .driver = {
+ .name = "exynos-nocp",
+ .of_match_table = exynos_nocp_id_match,
+ },
+};
+module_platform_driver(exynos_nocp_driver);
+
+MODULE_DESCRIPTION("Exynos NoC (Network on Chip) Probe driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h
new file mode 100644
index 000000000000..28564db0edb8
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.h
@@ -0,0 +1,78 @@
+/*
+ * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_NOCP_H__
+#define __EXYNOS_NOCP_H__
+
+enum nocp_reg {
+ NOCP_ID_REVISION_ID = 0x04,
+ NOCP_MAIN_CTL = 0x08,
+ NOCP_CFG_CTL = 0x0C,
+
+ NOCP_STAT_PERIOD = 0x24,
+ NOCP_STAT_GO = 0x28,
+ NOCP_STAT_ALARM_MIN = 0x2C,
+ NOCP_STAT_ALARM_MAX = 0x30,
+ NOCP_STAT_ALARM_STATUS = 0x34,
+ NOCP_STAT_ALARM_CLR = 0x38,
+
+ NOCP_COUNTERS_0_SRC = 0x138,
+ NOCP_COUNTERS_0_ALARM_MODE = 0x13C,
+ NOCP_COUNTERS_0_VAL = 0x140,
+
+ NOCP_COUNTERS_1_SRC = 0x14C,
+ NOCP_COUNTERS_1_ALARM_MODE = 0x150,
+ NOCP_COUNTERS_1_VAL = 0x154,
+
+ NOCP_COUNTERS_2_SRC = 0x160,
+ NOCP_COUNTERS_2_ALARM_MODE = 0x164,
+ NOCP_COUNTERS_2_VAL = 0x168,
+
+ NOCP_COUNTERS_3_SRC = 0x174,
+ NOCP_COUNTERS_3_ALARM_MODE = 0x178,
+ NOCP_COUNTERS_3_VAL = 0x17C,
+};
+
+/* NOCP_MAIN_CTL register */
+#define NOCP_MAIN_CTL_ERREN_MASK BIT(0)
+#define NOCP_MAIN_CTL_TRACEEN_MASK BIT(1)
+#define NOCP_MAIN_CTL_PAYLOADEN_MASK BIT(2)
+#define NOCP_MAIN_CTL_STATEN_MASK BIT(3)
+#define NOCP_MAIN_CTL_ALARMEN_MASK BIT(4)
+#define NOCP_MAIN_CTL_STATCONDDUMP_MASK BIT(5)
+#define NOCP_MAIN_CTL_INTRUSIVEMODE_MASK BIT(6)
+
+/* NOCP_CFG_CTL register */
+#define NOCP_CFG_CTL_GLOBALEN_MASK BIT(0)
+#define NOCP_CFG_CTL_ACTIVE_MASK BIT(1)
+
+/* NOCP_COUNTERS_x_SRC register */
+#define NOCP_CNT_SRC_INTEVENT_SHIFT 0
+#define NOCP_CNT_SRC_INTEVENT_MASK (0x1F << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_OFF_MASK (0x0 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CYCLE_MASK (0x1 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_IDLE_MASK (0x2 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_XFER_MASK (0x3 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BUSY_MASK (0x4 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_WAIT_MASK (0x5 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_PKT_MASK (0x6 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BYTE_MASK (0x8 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CHAIN_MASK (0x10 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+
+/* NOCP_COUNTERS_x_ALARM_MODE register */
+#define NOCP_CNT_ALARM_MODE_SHIFT 0
+#define NOCP_CNT_ALARM_MODE_MASK (0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_OFF_MASK (0x0 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MASK (0x1 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MAX_MASK (0x2 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MAX_MASK (0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+
+#endif /* __EXYNOS_NOCP_H__ */
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
new file mode 100644
index 000000000000..2363d0a189b7
--- /dev/null
+++ b/drivers/devfreq/exynos-bus.c
@@ -0,0 +1,570 @@
+/*
+ * Generic Exynos Bus frequency driver with DEVFREQ Framework
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This driver support Exynos Bus frequency feature by using
+ * DEVFREQ framework and is based on drivers/devfreq/exynos/exynos4_bus.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define DEFAULT_SATURATION_RATIO 40
+#define DEFAULT_VOLTAGE_TOLERANCE 2
+
+struct exynos_bus {
+ struct device *dev;
+
+ struct devfreq *devfreq;
+ struct devfreq_event_dev **edev;
+ unsigned int edev_count;
+ struct mutex lock;
+
+ struct dev_pm_opp *curr_opp;
+
+ struct regulator *regulator;
+ struct clk *clk;
+ unsigned int voltage_tolerance;
+ unsigned int ratio;
+};
+
+/*
+ * Control the devfreq-event device to get the current state of bus
+ */
+#define exynos_bus_ops_edev(ops) \
+static int exynos_bus_##ops(struct exynos_bus *bus) \
+{ \
+ int i, ret; \
+ \
+ for (i = 0; i < bus->edev_count; i++) { \
+ if (!bus->edev[i]) \
+ continue; \
+ ret = devfreq_event_##ops(bus->edev[i]); \
+ if (ret < 0) \
+ return ret; \
+ } \
+ \
+ return 0; \
+}
+exynos_bus_ops_edev(enable_edev);
+exynos_bus_ops_edev(disable_edev);
+exynos_bus_ops_edev(set_event);
+
+static int exynos_bus_get_event(struct exynos_bus *bus,
+ struct devfreq_event_data *edata)
+{
+ struct devfreq_event_data event_data;
+ unsigned long load_count = 0, total_count = 0;
+ int i, ret = 0;
+
+ for (i = 0; i < bus->edev_count; i++) {
+ if (!bus->edev[i])
+ continue;
+
+ ret = devfreq_event_get_event(bus->edev[i], &event_data);
+ if (ret < 0)
+ return ret;
+
+ if (i == 0 || event_data.load_count > load_count) {
+ load_count = event_data.load_count;
+ total_count = event_data.total_count;
+ }
+ }
+
+ edata->load_count = load_count;
+ edata->total_count = total_count;
+
+ return ret;
+}
+
+/*
+ * Must necessary function for devfreq simple-ondemand governor
+ */
+static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq, old_volt, new_volt, tol;
+ int ret = 0;
+
+ /* Get new opp-bus instance according to new bus clock */
+ rcu_read_lock();
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ dev_err(dev, "failed to get recommended opp instance\n");
+ rcu_read_unlock();
+ return PTR_ERR(new_opp);
+ }
+
+ new_freq = dev_pm_opp_get_freq(new_opp);
+ new_volt = dev_pm_opp_get_voltage(new_opp);
+ old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+ old_volt = dev_pm_opp_get_voltage(bus->curr_opp);
+ rcu_read_unlock();
+
+ if (old_freq == new_freq)
+ return 0;
+ tol = new_volt * bus->voltage_tolerance / 100;
+
+ /* Change voltage and frequency according to new OPP level */
+ mutex_lock(&bus->lock);
+
+ if (old_freq < new_freq) {
+ ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+ if (ret < 0) {
+ dev_err(bus->dev, "failed to set voltage\n");
+ goto out;
+ }
+ }
+
+ ret = clk_set_rate(bus->clk, new_freq);
+ if (ret < 0) {
+ dev_err(dev, "failed to change clock of bus\n");
+ clk_set_rate(bus->clk, old_freq);
+ goto out;
+ }
+
+ if (old_freq > new_freq) {
+ ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+ if (ret < 0) {
+ dev_err(bus->dev, "failed to set voltage\n");
+ goto out;
+ }
+ }
+ bus->curr_opp = new_opp;
+
+ dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
+ old_freq/1000, new_freq/1000);
+out:
+ mutex_unlock(&bus->lock);
+
+ return ret;
+}
+
+static int exynos_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ struct devfreq_event_data edata;
+ int ret;
+
+ rcu_read_lock();
+ stat->current_frequency = dev_pm_opp_get_freq(bus->curr_opp);
+ rcu_read_unlock();
+
+ ret = exynos_bus_get_event(bus, &edata);
+ if (ret < 0) {
+ stat->total_time = stat->busy_time = 0;
+ goto err;
+ }
+
+ stat->busy_time = (edata.load_count * 100) / bus->ratio;
+ stat->total_time = edata.total_count;
+
+ dev_dbg(dev, "Usage of devfreq-event : %lu/%lu\n", stat->busy_time,
+ stat->total_time);
+
+err:
+ ret = exynos_bus_set_event(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to set event to devfreq-event devices\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void exynos_bus_exit(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = exynos_bus_disable_edev(bus);
+ if (ret < 0)
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+ if (bus->regulator)
+ regulator_disable(bus->regulator);
+
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
+}
+
+/*
+ * Must necessary function for devfreq passive governor
+ */
+static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq;
+ int ret = 0;
+
+ /* Get new opp-bus instance according to new bus clock */
+ rcu_read_lock();
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ dev_err(dev, "failed to get recommended opp instance\n");
+ rcu_read_unlock();
+ return PTR_ERR(new_opp);
+ }
+
+ new_freq = dev_pm_opp_get_freq(new_opp);
+ old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+ rcu_read_unlock();
+
+ if (old_freq == new_freq)
+ return 0;
+
+ /* Change the frequency according to new OPP level */
+ mutex_lock(&bus->lock);
+
+ ret = clk_set_rate(bus->clk, new_freq);
+ if (ret < 0) {
+ dev_err(dev, "failed to set the clock of bus\n");
+ goto out;
+ }
+
+ *freq = new_freq;
+ bus->curr_opp = new_opp;
+
+ dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
+ old_freq/1000, new_freq/1000);
+out:
+ mutex_unlock(&bus->lock);
+
+ return ret;
+}
+
+static void exynos_bus_passive_exit(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
+}
+
+static int exynos_bus_parent_parse_of(struct device_node *np,
+ struct exynos_bus *bus)
+{
+ struct device *dev = bus->dev;
+ int i, ret, count, size;
+
+ /* Get the regulator to provide each bus with the power */
+ bus->regulator = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(bus->regulator)) {
+ dev_err(dev, "failed to get VDD regulator\n");
+ return PTR_ERR(bus->regulator);
+ }
+
+ ret = regulator_enable(bus->regulator);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable VDD regulator\n");
+ return ret;
+ }
+
+ /*
+ * Get the devfreq-event devices to get the current utilization of
+ * buses. This raw data will be used in devfreq ondemand governor.
+ */
+ count = devfreq_event_get_edev_count(dev);
+ if (count < 0) {
+ dev_err(dev, "failed to get the count of devfreq-event dev\n");
+ ret = count;
+ goto err_regulator;
+ }
+ bus->edev_count = count;
+
+ size = sizeof(*bus->edev) * count;
+ bus->edev = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!bus->edev) {
+ ret = -ENOMEM;
+ goto err_regulator;
+ }
+
+ for (i = 0; i < count; i++) {
+ bus->edev[i] = devfreq_event_get_edev_by_phandle(dev, i);
+ if (IS_ERR(bus->edev[i])) {
+ ret = -EPROBE_DEFER;
+ goto err_regulator;
+ }
+ }
+
+ /*
+ * Optionally, Get the saturation ratio according to Exynos SoC
+ * When measuring the utilization of each AXI bus with devfreq-event
+ * devices, the measured real cycle might be much lower than the
+ * total cycle of bus during sampling rate. In result, the devfreq
+ * simple-ondemand governor might not decide to change the current
+ * frequency due to too utilization (= real cycle/total cycle).
+ * So, this property is used to adjust the utilization when calculating
+ * the busy_time in exynos_bus_get_dev_status().
+ */
+ if (of_property_read_u32(np, "exynos,saturation-ratio", &bus->ratio))
+ bus->ratio = DEFAULT_SATURATION_RATIO;
+
+ if (of_property_read_u32(np, "exynos,voltage-tolerance",
+ &bus->voltage_tolerance))
+ bus->voltage_tolerance = DEFAULT_VOLTAGE_TOLERANCE;
+
+ return 0;
+
+err_regulator:
+ regulator_disable(bus->regulator);
+
+ return ret;
+}
+
+static int exynos_bus_parse_of(struct device_node *np,
+ struct exynos_bus *bus)
+{
+ struct device *dev = bus->dev;
+ unsigned long rate;
+ int ret;
+
+ /* Get the clock to provide each bus with source clock */
+ bus->clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(bus->clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ return PTR_ERR(bus->clk);
+ }
+
+ ret = clk_prepare_enable(bus->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to get enable clock\n");
+ return ret;
+ }
+
+ /* Get the freq and voltage from OPP table to scale the bus freq */
+ rcu_read_lock();
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ rcu_read_unlock();
+ goto err_clk;
+ }
+
+ rate = clk_get_rate(bus->clk);
+ bus->curr_opp = devfreq_recommended_opp(dev, &rate, 0);
+ if (IS_ERR(bus->curr_opp)) {
+ dev_err(dev, "failed to find dev_pm_opp\n");
+ rcu_read_unlock();
+ ret = PTR_ERR(bus->curr_opp);
+ goto err_opp;
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+err_opp:
+ dev_pm_opp_of_remove_table(dev);
+err_clk:
+ clk_disable_unprepare(bus->clk);
+
+ return ret;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct devfreq_dev_profile *profile;
+ struct devfreq_simple_ondemand_data *ondemand_data;
+ struct devfreq_passive_data *passive_data;
+ struct devfreq *parent_devfreq;
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
+ }
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ mutex_init(&bus->lock);
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+ goto err;
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (of_parse_phandle(dev->of_node, "devfreq", 0))
+ goto passive;
+ else
+ ret = exynos_bus_parent_parse_of(np, bus);
+
+ if (ret < 0)
+ goto err;
+
+ /* Initialize the struct profile and governor data for parent device */
+ profile->polling_ms = 50;
+ profile->target = exynos_bus_target;
+ profile->get_dev_status = exynos_bus_get_dev_status;
+ profile->exit = exynos_bus_exit;
+
+ ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
+ if (!ondemand_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ondemand_data->upthreshold = 40;
+ ondemand_data->downdifferential = 5;
+
+ /* Add devfreq device to monitor and handle the exynos bus */
+ bus->devfreq = devm_devfreq_add_device(dev, profile, "simple_ondemand",
+ ondemand_data);
+ if (IS_ERR(bus->devfreq)) {
+ dev_err(dev, "failed to add devfreq device\n");
+ ret = PTR_ERR(bus->devfreq);
+ goto err;
+ }
+
+ /* Register opp_notifier to catch the change of OPP */
+ ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
+ if (ret < 0) {
+ dev_err(dev, "failed to register opp notifier\n");
+ goto err;
+ }
+
+ /*
+ * Enable devfreq-event to get raw data which is used to determine
+ * current bus load.
+ */
+ ret = exynos_bus_enable_edev(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable devfreq-event devices\n");
+ goto err;
+ }
+
+ ret = exynos_bus_set_event(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to set event to devfreq-event devices\n");
+ goto err;
+ }
+
+ goto out;
+passive:
+ /* Initialize the struct profile and governor data for passive device */
+ profile->target = exynos_bus_passive_target;
+ profile->exit = exynos_bus_passive_exit;
+
+ /* Get the instance of parent devfreq device */
+ parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
+ if (IS_ERR(parent_devfreq)) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+
+ passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
+ if (!passive_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ passive_data->parent = parent_devfreq;
+
+ /* Add devfreq device for exynos bus with passive governor */
+ bus->devfreq = devm_devfreq_add_device(dev, profile, "passive",
+ passive_data);
+ if (IS_ERR(bus->devfreq)) {
+ dev_err(dev,
+ "failed to add devfreq dev with passive governor\n");
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+
+out:
+ max_state = bus->devfreq->profile->max_state;
+ min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
+ max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+ pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
+ dev_name(dev), min_freq, max_freq);
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_bus_resume(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = exynos_bus_enable_edev(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable the devfreq-event devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int exynos_bus_suspend(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = exynos_bus_disable_edev(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to disable the devfreq-event devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_bus_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
+};
+
+static const struct of_device_id exynos_bus_of_match[] = {
+ { .compatible = "samsung,exynos-bus", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
+
+static struct platform_driver exynos_bus_platdrv = {
+ .probe = exynos_bus_probe,
+ .driver = {
+ .name = "exynos-bus",
+ .pm = &exynos_bus_pm,
+ .of_match_table = of_match_ptr(exynos_bus_of_match),
+ },
+};
+module_platform_driver(exynos_bus_platdrv);
+
+MODULE_DESCRIPTION("Generic Exynos Bus frequency driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/exynos/Makefile b/drivers/devfreq/exynos/Makefile
deleted file mode 100644
index 49bc9175f923..000000000000
--- a/drivers/devfreq/exynos/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# Exynos DEVFREQ Drivers
-obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos_ppmu.o exynos4_bus.o
-obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos_ppmu.o exynos5_bus.o
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
deleted file mode 100644
index da9509205169..000000000000
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ /dev/null
@@ -1,1055 +0,0 @@
-/* drivers/devfreq/exynos4210_memorybus.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
- * This version supports EXYNOS4210 only. This changes bus frequencies
- * and vddint voltages. Exynos4412/4212 should be able to be supported
- * with minor modifications.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/suspend.h>
-#include <linux/pm_opp.h>
-#include <linux/devfreq.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/module.h>
-
-#include <mach/map.h>
-
-#include "exynos_ppmu.h"
-#include "exynos4_bus.h"
-
-#define MAX_SAFEVOLT 1200000 /* 1.2V */
-
-enum exynos4_busf_type {
- TYPE_BUSF_EXYNOS4210,
- TYPE_BUSF_EXYNOS4x12,
-};
-
-/* Assume that the bus is saturated if the utilization is 40% */
-#define BUS_SATURATION_RATIO 40
-
-enum busclk_level_idx {
- LV_0 = 0,
- LV_1,
- LV_2,
- LV_3,
- LV_4,
- _LV_END
-};
-
-enum exynos_ppmu_idx {
- PPMU_DMC0,
- PPMU_DMC1,
- PPMU_END,
-};
-
-#define EX4210_LV_MAX LV_2
-#define EX4x12_LV_MAX LV_4
-#define EX4210_LV_NUM (LV_2 + 1)
-#define EX4x12_LV_NUM (LV_4 + 1)
-
-/**
- * struct busfreq_opp_info - opp information for bus
- * @rate: Frequency in hertz
- * @volt: Voltage in microvolts corresponding to this OPP
- */
-struct busfreq_opp_info {
- unsigned long rate;
- unsigned long volt;
-};
-
-struct busfreq_data {
- enum exynos4_busf_type type;
- struct device *dev;
- struct devfreq *devfreq;
- bool disabled;
- struct regulator *vdd_int;
- struct regulator *vdd_mif; /* Exynos4412/4212 only */
- struct busfreq_opp_info curr_oppinfo;
- struct busfreq_ppmu_data ppmu_data;
-
- struct notifier_block pm_notifier;
- struct mutex lock;
-
- /* Dividers calculated at boot/probe-time */
- unsigned int dmc_divtable[_LV_END]; /* DMC0 */
- unsigned int top_divtable[_LV_END];
-};
-
-/* 4210 controls clock of mif and voltage of int */
-static struct bus_opp_table exynos4210_busclk_table[] = {
- {LV_0, 400000, 1150000},
- {LV_1, 267000, 1050000},
- {LV_2, 133000, 1025000},
- {0, 0, 0},
-};
-
-/*
- * MIF is the main control knob clock for Exynos4x12 MIF/INT
- * clock and voltage of both mif/int are controlled.
- */
-static struct bus_opp_table exynos4x12_mifclk_table[] = {
- {LV_0, 400000, 1100000},
- {LV_1, 267000, 1000000},
- {LV_2, 160000, 950000},
- {LV_3, 133000, 950000},
- {LV_4, 100000, 950000},
- {0, 0, 0},
-};
-
-/*
- * INT is not the control knob of 4x12. LV_x is not meant to represent
- * the current performance. (MIF does)
- */
-static struct bus_opp_table exynos4x12_intclk_table[] = {
- {LV_0, 200000, 1000000},
- {LV_1, 160000, 950000},
- {LV_2, 133000, 925000},
- {LV_3, 100000, 900000},
- {0, 0, 0},
-};
-
-/* TODO: asv volt definitions are "__initdata"? */
-/* Some chips have different operating voltages */
-static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
- {1150000, 1050000, 1050000},
- {1125000, 1025000, 1025000},
- {1100000, 1000000, 1000000},
- {1075000, 975000, 975000},
- {1050000, 950000, 950000},
-};
-
-static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
- /* 400 267 160 133 100 */
- {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
- {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
- {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
- {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
- {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
- {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
- {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
- {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
- {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
-};
-
-static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
- /* 200 160 133 100 */
- {1000000, 950000, 925000, 900000}, /* ASV0 */
- {975000, 925000, 925000, 900000}, /* ASV1 */
- {950000, 925000, 900000, 875000}, /* ASV2 */
- {950000, 900000, 900000, 875000}, /* ASV3 */
- {925000, 875000, 875000, 875000}, /* ASV4 */
- {900000, 850000, 850000, 850000}, /* ASV5 */
- {900000, 850000, 850000, 850000}, /* ASV6 */
- {900000, 850000, 850000, 850000}, /* ASV7 */
- {900000, 850000, 850000, 850000}, /* ASV8 */
-};
-
-/*** Clock Divider Data for Exynos4210 ***/
-static unsigned int exynos4210_clkdiv_dmc0[][8] = {
- /*
- * Clock divider value for following
- * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
- * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
- */
-
- /* DMC L0: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
- /* DMC L1: 266.7MHz */
- { 4, 1, 1, 2, 1, 1, 3, 1 },
- /* DMC L2: 133MHz */
- { 5, 1, 1, 5, 1, 1, 3, 1 },
-};
-static unsigned int exynos4210_clkdiv_top[][5] = {
- /*
- * Clock divider value for following
- * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
- */
- /* ACLK200 L0: 200MHz */
- { 3, 7, 4, 5, 1 },
- /* ACLK200 L1: 160MHz */
- { 4, 7, 5, 6, 1 },
- /* ACLK200 L2: 133MHz */
- { 5, 7, 7, 7, 1 },
-};
-static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
- /*
- * Clock divider value for following
- * { DIVGDL/R, DIVGPL/R }
- */
- /* ACLK_GDL/R L1: 200MHz */
- { 3, 1 },
- /* ACLK_GDL/R L2: 160MHz */
- { 4, 1 },
- /* ACLK_GDL/R L3: 133MHz */
- { 5, 1 },
-};
-
-/*** Clock Divider Data for Exynos4212/4412 ***/
-static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
- /*
- * Clock divider value for following
- * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
- * DIVDMCP}
- */
-
- /* DMC L0: 400MHz */
- {3, 1, 1, 1, 1, 1},
- /* DMC L1: 266.7MHz */
- {4, 1, 1, 2, 1, 1},
- /* DMC L2: 160MHz */
- {5, 1, 1, 4, 1, 1},
- /* DMC L3: 133MHz */
- {5, 1, 1, 5, 1, 1},
- /* DMC L4: 100MHz */
- {7, 1, 1, 7, 1, 1},
-};
-static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
- /*
- * Clock divider value for following
- * { G2DACP, DIVC2C, DIVC2C_ACLK }
- */
-
- /* DMC L0: 400MHz */
- {3, 1, 1},
- /* DMC L1: 266.7MHz */
- {4, 2, 1},
- /* DMC L2: 160MHz */
- {5, 4, 1},
- /* DMC L3: 133MHz */
- {5, 5, 1},
- /* DMC L4: 100MHz */
- {7, 7, 1},
-};
-static unsigned int exynos4x12_clkdiv_top[][5] = {
- /*
- * Clock divider value for following
- * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
- DIVACLK133, DIVONENAND }
- */
-
- /* ACLK_GDL/R L0: 200MHz */
- {2, 7, 4, 5, 1},
- /* ACLK_GDL/R L1: 200MHz */
- {2, 7, 4, 5, 1},
- /* ACLK_GDL/R L2: 160MHz */
- {4, 7, 5, 7, 1},
- /* ACLK_GDL/R L3: 133MHz */
- {4, 7, 5, 7, 1},
- /* ACLK_GDL/R L4: 100MHz */
- {7, 7, 7, 7, 1},
-};
-static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
- /*
- * Clock divider value for following
- * { DIVGDL/R, DIVGPL/R }
- */
-
- /* ACLK_GDL/R L0: 200MHz */
- {3, 1},
- /* ACLK_GDL/R L1: 200MHz */
- {3, 1},
- /* ACLK_GDL/R L2: 160MHz */
- {4, 1},
- /* ACLK_GDL/R L3: 133MHz */
- {5, 1},
- /* ACLK_GDL/R L4: 100MHz */
- {7, 1},
-};
-static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
- /*
- * Clock divider value for following
- * { DIVMFC, DIVJPEG, DIVFIMC0~3}
- */
-
- /* SCLK_MFC: 200MHz */
- {3, 3, 4},
- /* SCLK_MFC: 200MHz */
- {3, 3, 4},
- /* SCLK_MFC: 160MHz */
- {4, 4, 5},
- /* SCLK_MFC: 133MHz */
- {5, 5, 5},
- /* SCLK_MFC: 100MHz */
- {7, 7, 7},
-};
-
-
-static int exynos4210_set_busclk(struct busfreq_data *data,
- struct busfreq_opp_info *oppi)
-{
- unsigned int index;
- unsigned int tmp;
-
- for (index = LV_0; index < EX4210_LV_NUM; index++)
- if (oppi->rate == exynos4210_busclk_table[index].clk)
- break;
-
- if (index == EX4210_LV_NUM)
- return -EINVAL;
-
- /* Change Divider - DMC0 */
- tmp = data->dmc_divtable[index];
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
- } while (tmp & 0x11111111);
-
- /* Change Divider - TOP */
- tmp = data->top_divtable[index];
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
- } while (tmp & 0x11111);
-
- /* Change Divider - LEFTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4210_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - RIGHTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4210_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
- } while (tmp & 0x11);
-
- return 0;
-}
-
-static int exynos4x12_set_busclk(struct busfreq_data *data,
- struct busfreq_opp_info *oppi)
-{
- unsigned int index;
- unsigned int tmp;
-
- for (index = LV_0; index < EX4x12_LV_NUM; index++)
- if (oppi->rate == exynos4x12_mifclk_table[index].clk)
- break;
-
- if (index == EX4x12_LV_NUM)
- return -EINVAL;
-
- /* Change Divider - DMC0 */
- tmp = data->dmc_divtable[index];
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
- } while (tmp & 0x11111111);
-
- /* Change Divider - DMC1 */
- tmp = __raw_readl(EXYNOS4_CLKDIV_DMC1);
-
- tmp &= ~(EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK |
- EXYNOS4_CLKDIV_DMC1_C2C_MASK |
- EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK);
-
- tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
- EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT) |
- (exynos4x12_clkdiv_dmc1[index][1] <<
- EXYNOS4_CLKDIV_DMC1_C2C_SHIFT) |
- (exynos4x12_clkdiv_dmc1[index][2] <<
- EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_DMC1);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC1);
- } while (tmp & 0x111111);
-
- /* Change Divider - TOP */
- tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
-
- tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
- EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
-
- tmp |= ((exynos4x12_clkdiv_top[index][0] <<
- EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
- (exynos4x12_clkdiv_top[index][1] <<
- EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
- (exynos4x12_clkdiv_top[index][2] <<
- EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
- (exynos4x12_clkdiv_top[index][3] <<
- EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
- (exynos4x12_clkdiv_top[index][4] <<
- EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
- } while (tmp & 0x11111);
-
- /* Change Divider - LEFTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4x12_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - RIGHTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4x12_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - MFC */
- tmp = __raw_readl(EXYNOS4_CLKDIV_MFC);
-
- tmp &= ~(EXYNOS4_CLKDIV_MFC_MASK);
-
- tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
- EXYNOS4_CLKDIV_MFC_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_MFC);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_MFC);
- } while (tmp & 0x1);
-
- /* Change Divider - JPEG */
- tmp = __raw_readl(EXYNOS4_CLKDIV_CAM1);
-
- tmp &= ~(EXYNOS4_CLKDIV_CAM1_JPEG_MASK);
-
- tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
- EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_CAM1);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
- } while (tmp & 0x1);
-
- /* Change Divider - FIMC0~3 */
- tmp = __raw_readl(EXYNOS4_CLKDIV_CAM);
-
- tmp &= ~(EXYNOS4_CLKDIV_CAM_FIMC0_MASK | EXYNOS4_CLKDIV_CAM_FIMC1_MASK |
- EXYNOS4_CLKDIV_CAM_FIMC2_MASK | EXYNOS4_CLKDIV_CAM_FIMC3_MASK);
-
- tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT) |
- (exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT) |
- (exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT) |
- (exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_CAM);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
- } while (tmp & 0x1111);
-
- return 0;
-}
-
-static int exynos4x12_get_intspec(unsigned long mifclk)
-{
- int i = 0;
-
- while (exynos4x12_intclk_table[i].clk) {
- if (exynos4x12_intclk_table[i].clk <= mifclk)
- return i;
- i++;
- }
-
- return -EINVAL;
-}
-
-static int exynos4_bus_setvolt(struct busfreq_data *data,
- struct busfreq_opp_info *oppi,
- struct busfreq_opp_info *oldoppi)
-{
- int err = 0, tmp;
- unsigned long volt = oppi->volt;
-
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- /* OPP represents DMC clock + INT voltage */
- err = regulator_set_voltage(data->vdd_int, volt,
- MAX_SAFEVOLT);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- /* OPP represents MIF clock + MIF voltage */
- err = regulator_set_voltage(data->vdd_mif, volt,
- MAX_SAFEVOLT);
- if (err)
- break;
-
- tmp = exynos4x12_get_intspec(oppi->rate);
- if (tmp < 0) {
- err = tmp;
- regulator_set_voltage(data->vdd_mif,
- oldoppi->volt,
- MAX_SAFEVOLT);
- break;
- }
- err = regulator_set_voltage(data->vdd_int,
- exynos4x12_intclk_table[tmp].volt,
- MAX_SAFEVOLT);
- /* Try to recover */
- if (err)
- regulator_set_voltage(data->vdd_mif,
- oldoppi->volt,
- MAX_SAFEVOLT);
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
- u32 flags)
-{
- int err = 0;
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data *data = platform_get_drvdata(pdev);
- struct dev_pm_opp *opp;
- unsigned long freq;
- unsigned long old_freq = data->curr_oppinfo.rate;
- struct busfreq_opp_info new_oppinfo;
-
- rcu_read_lock();
- opp = devfreq_recommended_opp(dev, _freq, flags);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- return PTR_ERR(opp);
- }
- new_oppinfo.rate = dev_pm_opp_get_freq(opp);
- new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
- freq = new_oppinfo.rate;
-
- if (old_freq == freq)
- return 0;
-
- dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);
-
- mutex_lock(&data->lock);
-
- if (data->disabled)
- goto out;
-
- if (old_freq < freq)
- err = exynos4_bus_setvolt(data, &new_oppinfo,
- &data->curr_oppinfo);
- if (err)
- goto out;
-
- if (old_freq != freq) {
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, &new_oppinfo);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, &new_oppinfo);
- break;
- default:
- err = -EINVAL;
- }
- }
- if (err)
- goto out;
-
- if (old_freq > freq)
- err = exynos4_bus_setvolt(data, &new_oppinfo,
- &data->curr_oppinfo);
- if (err)
- goto out;
-
- data->curr_oppinfo = new_oppinfo;
-out:
- mutex_unlock(&data->lock);
- return err;
-}
-
-static int exynos4_bus_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct busfreq_data *data = dev_get_drvdata(dev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
- int busier;
-
- exynos_read_ppmu(ppmu_data);
- busier = exynos_get_busier_ppmu(ppmu_data);
- stat->current_frequency = data->curr_oppinfo.rate;
-
- /* Number of cycles spent on memory access */
- stat->busy_time = ppmu_data->ppmu[busier].count[PPMU_PMNCNT3];
- stat->busy_time *= 100 / BUS_SATURATION_RATIO;
- stat->total_time = ppmu_data->ppmu[busier].ccnt;
-
- /* If the counters have overflown, retry */
- if (ppmu_data->ppmu[busier].ccnt_overflow ||
- ppmu_data->ppmu[busier].count_overflow[0])
- return -EAGAIN;
-
- return 0;
-}
-
-static struct devfreq_dev_profile exynos4_devfreq_profile = {
- .initial_freq = 400000,
- .polling_ms = 50,
- .target = exynos4_bus_target,
- .get_dev_status = exynos4_bus_get_dev_status,
-};
-
-static int exynos4210_init_tables(struct busfreq_data *data)
-{
- u32 tmp;
- int mgrp;
- int i, err = 0;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
- for (i = LV_0; i < EX4210_LV_NUM; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
- EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
- EXYNOS4_CLKDIV_DMC0_DMC_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCP_MASK |
- EXYNOS4_CLKDIV_DMC0_COPY2_MASK |
- EXYNOS4_CLKDIV_DMC0_CORETI_MASK);
-
- tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
- EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][1] <<
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][2] <<
- EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][3] <<
- EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][4] <<
- EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][5] <<
- EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][6] <<
- EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][7] <<
- EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT));
-
- data->dmc_divtable[i] = tmp;
- }
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
- for (i = LV_0; i < EX4210_LV_NUM; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK200_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
- EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
-
- tmp |= ((exynos4210_clkdiv_top[i][0] <<
- EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT) |
- (exynos4210_clkdiv_top[i][1] <<
- EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
- (exynos4210_clkdiv_top[i][2] <<
- EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
- (exynos4210_clkdiv_top[i][3] <<
- EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
- (exynos4210_clkdiv_top[i][4] <<
- EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
-
- data->top_divtable[i] = tmp;
- }
-
- /*
- * TODO: init tmp based on busfreq_data
- * (device-tree or platform-data)
- */
- tmp = 0; /* Max voltages for the reliability of the unknown */
-
- pr_debug("ASV Group of Exynos4 is %d\n", tmp);
- /* Use merged grouping for voltage */
- switch (tmp) {
- case 0:
- mgrp = 0;
- break;
- case 1:
- case 2:
- mgrp = 1;
- break;
- case 3:
- case 4:
- mgrp = 2;
- break;
- case 5:
- case 6:
- mgrp = 3;
- break;
- case 7:
- mgrp = 4;
- break;
- default:
- pr_warn("Unknown ASV Group. Use max voltage.\n");
- mgrp = 0;
- }
-
- for (i = LV_0; i < EX4210_LV_NUM; i++)
- exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
-
- for (i = LV_0; i < EX4210_LV_NUM; i++) {
- err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
- exynos4210_busclk_table[i].volt);
- if (err) {
- dev_err(data->dev, "Cannot add opp entries.\n");
- return err;
- }
- }
-
-
- return 0;
-}
-
-static int exynos4x12_init_tables(struct busfreq_data *data)
-{
- unsigned int i;
- unsigned int tmp;
- int ret;
-
- /* Enable pause function for DREX2 DVFS */
- tmp = __raw_readl(EXYNOS4_DMC_PAUSE_CTRL);
- tmp |= EXYNOS4_DMC_PAUSE_ENABLE;
- __raw_writel(tmp, EXYNOS4_DMC_PAUSE_CTRL);
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
-
- for (i = 0; i < EX4x12_LV_NUM; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
- EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
- EXYNOS4_CLKDIV_DMC0_DMC_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCP_MASK);
-
- tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
- EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][1] <<
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][2] <<
- EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][3] <<
- EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][4] <<
- EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][5] <<
- EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT));
-
- data->dmc_divtable[i] = tmp;
- }
-
- tmp = 0; /* Max voltages for the reliability of the unknown */
-
- if (tmp > 8)
- tmp = 0;
- pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
-
- for (i = 0; i < EX4x12_LV_NUM; i++) {
- exynos4x12_mifclk_table[i].volt =
- exynos4x12_mif_step_50[tmp][i];
- exynos4x12_intclk_table[i].volt =
- exynos4x12_int_volt[tmp][i];
- }
-
- for (i = 0; i < EX4x12_LV_NUM; i++) {
- ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
- exynos4x12_mifclk_table[i].volt);
- if (ret) {
- dev_err(data->dev, "Fail to add opp entries.\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct busfreq_data *data = container_of(this, struct busfreq_data,
- pm_notifier);
- struct dev_pm_opp *opp;
- struct busfreq_opp_info new_oppinfo;
- unsigned long maxfreq = ULONG_MAX;
- int err = 0;
-
- switch (event) {
- case PM_SUSPEND_PREPARE:
- /* Set Fastest and Deactivate DVFS */
- mutex_lock(&data->lock);
-
- data->disabled = true;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(data->dev, "%s: unable to find a min freq\n",
- __func__);
- mutex_unlock(&data->lock);
- return PTR_ERR(opp);
- }
- new_oppinfo.rate = dev_pm_opp_get_freq(opp);
- new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- err = exynos4_bus_setvolt(data, &new_oppinfo,
- &data->curr_oppinfo);
- if (err)
- goto unlock;
-
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, &new_oppinfo);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, &new_oppinfo);
- break;
- default:
- err = -EINVAL;
- }
- if (err)
- goto unlock;
-
- data->curr_oppinfo = new_oppinfo;
-unlock:
- mutex_unlock(&data->lock);
- if (err)
- return err;
- return NOTIFY_OK;
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- /* Reactivate */
- mutex_lock(&data->lock);
- data->disabled = false;
- mutex_unlock(&data->lock);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
-}
-
-static int exynos4_busfreq_probe(struct platform_device *pdev)
-{
- struct busfreq_data *data;
- struct busfreq_ppmu_data *ppmu_data;
- struct dev_pm_opp *opp;
- struct device *dev = &pdev->dev;
- int err = 0;
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
- if (data == NULL) {
- dev_err(dev, "Cannot allocate memory.\n");
- return -ENOMEM;
- }
-
- ppmu_data = &data->ppmu_data;
- ppmu_data->ppmu_end = PPMU_END;
- ppmu_data->ppmu = devm_kzalloc(dev,
- sizeof(struct exynos_ppmu) * PPMU_END,
- GFP_KERNEL);
- if (!ppmu_data->ppmu) {
- dev_err(dev, "Failed to allocate memory for exynos_ppmu\n");
- return -ENOMEM;
- }
-
- data->type = pdev->id_entry->driver_data;
- ppmu_data->ppmu[PPMU_DMC0].hw_base = S5P_VA_DMC0;
- ppmu_data->ppmu[PPMU_DMC1].hw_base = S5P_VA_DMC1;
- data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
- data->dev = dev;
- mutex_init(&data->lock);
-
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_init_tables(data);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_init_tables(data);
- break;
- default:
- dev_err(dev, "Cannot determine the device id %d\n", data->type);
- err = -EINVAL;
- }
- if (err) {
- dev_err(dev, "Cannot initialize busfreq table %d\n",
- data->type);
- return err;
- }
-
- data->vdd_int = devm_regulator_get(dev, "vdd_int");
- if (IS_ERR(data->vdd_int)) {
- dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- return PTR_ERR(data->vdd_int);
- }
- if (data->type == TYPE_BUSF_EXYNOS4x12) {
- data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
- if (IS_ERR(data->vdd_mif)) {
- dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
- return PTR_ERR(data->vdd_mif);
- }
- }
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(dev,
- &exynos4_devfreq_profile.initial_freq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(dev, "Invalid initial frequency %lu kHz.\n",
- exynos4_devfreq_profile.initial_freq);
- return PTR_ERR(opp);
- }
- data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
- data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- platform_set_drvdata(pdev, data);
-
- data->devfreq = devm_devfreq_add_device(dev, &exynos4_devfreq_profile,
- "simple_ondemand", NULL);
- if (IS_ERR(data->devfreq))
- return PTR_ERR(data->devfreq);
-
- /*
- * Start PPMU (Performance Profiling Monitoring Unit) to check
- * utilization of each IP in the Exynos4 SoC.
- */
- busfreq_mon_reset(ppmu_data);
-
- /* Register opp_notifier for Exynos4 busfreq */
- err = devm_devfreq_register_opp_notifier(dev, data->devfreq);
- if (err < 0) {
- dev_err(dev, "Failed to register opp notifier\n");
- return err;
- }
-
- /* Register pm_notifier for Exynos4 busfreq */
- err = register_pm_notifier(&data->pm_notifier);
- if (err) {
- dev_err(dev, "Failed to setup pm notifier\n");
- return err;
- }
-
- return 0;
-}
-
-static int exynos4_busfreq_remove(struct platform_device *pdev)
-{
- struct busfreq_data *data = platform_get_drvdata(pdev);
-
- /* Unregister all of notifier chain */
- unregister_pm_notifier(&data->pm_notifier);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos4_busfreq_resume(struct device *dev)
-{
- struct busfreq_data *data = dev_get_drvdata(dev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-
- busfreq_mon_reset(ppmu_data);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(exynos4_busfreq_pm_ops, NULL, exynos4_busfreq_resume);
-
-static const struct platform_device_id exynos4_busfreq_id[] = {
- { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
- { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
- { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
- { },
-};
-
-static struct platform_driver exynos4_busfreq_driver = {
- .probe = exynos4_busfreq_probe,
- .remove = exynos4_busfreq_remove,
- .id_table = exynos4_busfreq_id,
- .driver = {
- .name = "exynos4-busfreq",
- .pm = &exynos4_busfreq_pm_ops,
- },
-};
-
-static int __init exynos4_busfreq_init(void)
-{
- return platform_driver_register(&exynos4_busfreq_driver);
-}
-late_initcall(exynos4_busfreq_init);
-
-static void __exit exynos4_busfreq_exit(void)
-{
- platform_driver_unregister(&exynos4_busfreq_driver);
-}
-module_exit(exynos4_busfreq_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/devfreq/exynos/exynos4_bus.h b/drivers/devfreq/exynos/exynos4_bus.h
deleted file mode 100644
index 94c73c18d28c..000000000000
--- a/drivers/devfreq/exynos/exynos4_bus.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS4 BUS header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __DEVFREQ_EXYNOS4_BUS_H
-#define __DEVFREQ_EXYNOS4_BUS_H __FILE__
-
-#include <mach/map.h>
-
-#define EXYNOS4_CLKDIV_LEFTBUS (S5P_VA_CMU + 0x04500)
-#define EXYNOS4_CLKDIV_STAT_LEFTBUS (S5P_VA_CMU + 0x04600)
-
-#define EXYNOS4_CLKDIV_RIGHTBUS (S5P_VA_CMU + 0x08500)
-#define EXYNOS4_CLKDIV_STAT_RIGHTBUS (S5P_VA_CMU + 0x08600)
-
-#define EXYNOS4_CLKDIV_TOP (S5P_VA_CMU + 0x0C510)
-#define EXYNOS4_CLKDIV_CAM (S5P_VA_CMU + 0x0C520)
-#define EXYNOS4_CLKDIV_MFC (S5P_VA_CMU + 0x0C528)
-
-#define EXYNOS4_CLKDIV_STAT_TOP (S5P_VA_CMU + 0x0C610)
-#define EXYNOS4_CLKDIV_STAT_MFC (S5P_VA_CMU + 0x0C628)
-
-#define EXYNOS4210_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x0C930)
-#define EXYNOS4212_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x04930)
-
-#define EXYNOS4_CLKDIV_DMC0 (S5P_VA_CMU + 0x10500)
-#define EXYNOS4_CLKDIV_DMC1 (S5P_VA_CMU + 0x10504)
-#define EXYNOS4_CLKDIV_STAT_DMC0 (S5P_VA_CMU + 0x10600)
-#define EXYNOS4_CLKDIV_STAT_DMC1 (S5P_VA_CMU + 0x10604)
-
-#define EXYNOS4_DMC_PAUSE_CTRL (S5P_VA_CMU + 0x11094)
-#define EXYNOS4_DMC_PAUSE_ENABLE (1 << 0)
-
-#define EXYNOS4_CLKDIV_DMC0_ACP_SHIFT (0)
-#define EXYNOS4_CLKDIV_DMC0_ACP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT (4)
-#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT (8)
-#define EXYNOS4_CLKDIV_DMC0_DPHY_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMC_SHIFT (12)
-#define EXYNOS4_CLKDIV_DMC0_DMC_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMC_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT (16)
-#define EXYNOS4_CLKDIV_DMC0_DMCD_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT (20)
-#define EXYNOS4_CLKDIV_DMC0_DMCP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT (24)
-#define EXYNOS4_CLKDIV_DMC0_COPY2_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT (28)
-#define EXYNOS4_CLKDIV_DMC0_CORETI_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT)
-
-#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT (0)
-#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK (0xf << EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_C2C_SHIFT (4)
-#define EXYNOS4_CLKDIV_DMC1_C2C_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2C_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_PWI_SHIFT (8)
-#define EXYNOS4_CLKDIV_DMC1_PWI_MASK (0xf << EXYNOS4_CLKDIV_DMC1_PWI_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT (12)
-#define EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT (16)
-#define EXYNOS4_CLKDIV_DMC1_DVSEM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_DPM_SHIFT (24)
-#define EXYNOS4_CLKDIV_DMC1_DPM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DPM_SHIFT)
-
-#define EXYNOS4_CLKDIV_MFC_SHIFT (0)
-#define EXYNOS4_CLKDIV_MFC_MASK (0x7 << EXYNOS4_CLKDIV_MFC_SHIFT)
-
-#define EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT (0)
-#define EXYNOS4_CLKDIV_TOP_ACLK200_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT (4)
-#define EXYNOS4_CLKDIV_TOP_ACLK100_MASK (0xF << EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT (8)
-#define EXYNOS4_CLKDIV_TOP_ACLK160_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT (12)
-#define EXYNOS4_CLKDIV_TOP_ACLK133_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT (16)
-#define EXYNOS4_CLKDIV_TOP_ONENAND_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT (20)
-#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT (24)
-#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT)
-
-#define EXYNOS4_CLKDIV_BUS_GDLR_SHIFT (0)
-#define EXYNOS4_CLKDIV_BUS_GDLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GDLR_SHIFT)
-#define EXYNOS4_CLKDIV_BUS_GPLR_SHIFT (4)
-#define EXYNOS4_CLKDIV_BUS_GPLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GPLR_SHIFT)
-
-#define EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT (0)
-#define EXYNOS4_CLKDIV_CAM_FIMC0_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT (4)
-#define EXYNOS4_CLKDIV_CAM_FIMC1_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT (8)
-#define EXYNOS4_CLKDIV_CAM_FIMC2_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT (12)
-#define EXYNOS4_CLKDIV_CAM_FIMC3_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT)
-
-#define EXYNOS4_CLKDIV_CAM1 (S5P_VA_CMU + 0x0C568)
-
-#define EXYNOS4_CLKDIV_STAT_CAM1 (S5P_VA_CMU + 0x0C668)
-
-#define EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT (0)
-#define EXYNOS4_CLKDIV_CAM1_JPEG_MASK (0xf << EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT)
-
-#endif /* __DEVFREQ_EXYNOS4_BUS_H */
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
deleted file mode 100644
index 297ea30d4159..000000000000
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS5 INT clock frequency scaling support using DEVFREQ framework
- * Based on work done by Jonghwan Choi <jhbird.choi@samsung.com>
- * Support for only EXYNOS5250 is present.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/devfreq.h>
-#include <linux/io.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
-#include <linux/regulator/consumer.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include "exynos_ppmu.h"
-
-#define MAX_SAFEVOLT 1100000 /* 1.10V */
-/* Assume that the bus is saturated if the utilization is 25% */
-#define INT_BUS_SATURATION_RATIO 25
-
-enum int_level_idx {
- LV_0,
- LV_1,
- LV_2,
- LV_3,
- LV_4,
- _LV_END
-};
-
-enum exynos_ppmu_list {
- PPMU_RIGHT,
- PPMU_END,
-};
-
-struct busfreq_data_int {
- struct device *dev;
- struct devfreq *devfreq;
- struct regulator *vdd_int;
- struct busfreq_ppmu_data ppmu_data;
- unsigned long curr_freq;
- bool disabled;
-
- struct notifier_block pm_notifier;
- struct mutex lock;
- struct pm_qos_request int_req;
- struct clk *int_clk;
-};
-
-struct int_bus_opp_table {
- unsigned int idx;
- unsigned long clk;
- unsigned long volt;
-};
-
-static struct int_bus_opp_table exynos5_int_opp_table[] = {
- {LV_0, 266000, 1025000},
- {LV_1, 200000, 1025000},
- {LV_2, 160000, 1025000},
- {LV_3, 133000, 1025000},
- {LV_4, 100000, 1025000},
- {0, 0, 0},
-};
-
-static int exynos5_int_setvolt(struct busfreq_data_int *data,
- unsigned long volt)
-{
- return regulator_set_voltage(data->vdd_int, volt, MAX_SAFEVOLT);
-}
-
-static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
- u32 flags)
-{
- int err = 0;
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct dev_pm_opp *opp;
- unsigned long old_freq, freq;
- unsigned long volt;
-
- rcu_read_lock();
- opp = devfreq_recommended_opp(dev, _freq, flags);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(dev, "%s: Invalid OPP.\n", __func__);
- return PTR_ERR(opp);
- }
-
- freq = dev_pm_opp_get_freq(opp);
- volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- old_freq = data->curr_freq;
-
- if (old_freq == freq)
- return 0;
-
- dev_dbg(dev, "targeting %lukHz %luuV\n", freq, volt);
-
- mutex_lock(&data->lock);
-
- if (data->disabled)
- goto out;
-
- if (freq > exynos5_int_opp_table[0].clk)
- pm_qos_update_request(&data->int_req, freq * 16 / 1000);
- else
- pm_qos_update_request(&data->int_req, -1);
-
- if (old_freq < freq)
- err = exynos5_int_setvolt(data, volt);
- if (err)
- goto out;
-
- err = clk_set_rate(data->int_clk, freq * 1000);
-
- if (err)
- goto out;
-
- if (old_freq > freq)
- err = exynos5_int_setvolt(data, volt);
- if (err)
- goto out;
-
- data->curr_freq = freq;
-out:
- mutex_unlock(&data->lock);
- return err;
-}
-
-static int exynos5_int_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
- int busier_dmc;
-
- exynos_read_ppmu(ppmu_data);
- busier_dmc = exynos_get_busier_ppmu(ppmu_data);
-
- stat->current_frequency = data->curr_freq;
-
- /* Number of cycles spent on memory access */
- stat->busy_time = ppmu_data->ppmu[busier_dmc].count[PPMU_PMNCNT3];
- stat->busy_time *= 100 / INT_BUS_SATURATION_RATIO;
- stat->total_time = ppmu_data->ppmu[busier_dmc].ccnt;
-
- return 0;
-}
-
-static struct devfreq_dev_profile exynos5_devfreq_int_profile = {
- .initial_freq = 160000,
- .polling_ms = 100,
- .target = exynos5_busfreq_int_target,
- .get_dev_status = exynos5_int_get_dev_status,
-};
-
-static int exynos5250_init_int_tables(struct busfreq_data_int *data)
-{
- int i, err = 0;
-
- for (i = LV_0; i < _LV_END; i++) {
- err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
- exynos5_int_opp_table[i].volt);
- if (err) {
- dev_err(data->dev, "Cannot add opp entries.\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct busfreq_data_int *data = container_of(this,
- struct busfreq_data_int, pm_notifier);
- struct dev_pm_opp *opp;
- unsigned long maxfreq = ULONG_MAX;
- unsigned long freq;
- unsigned long volt;
- int err = 0;
-
- switch (event) {
- case PM_SUSPEND_PREPARE:
- /* Set Fastest and Deactivate DVFS */
- mutex_lock(&data->lock);
-
- data->disabled = true;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- err = PTR_ERR(opp);
- goto unlock;
- }
- freq = dev_pm_opp_get_freq(opp);
- volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- err = exynos5_int_setvolt(data, volt);
- if (err)
- goto unlock;
-
- err = clk_set_rate(data->int_clk, freq * 1000);
-
- if (err)
- goto unlock;
-
- data->curr_freq = freq;
-unlock:
- mutex_unlock(&data->lock);
- if (err)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- /* Reactivate */
- mutex_lock(&data->lock);
- data->disabled = false;
- mutex_unlock(&data->lock);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
-}
-
-static int exynos5_busfreq_int_probe(struct platform_device *pdev)
-{
- struct busfreq_data_int *data;
- struct busfreq_ppmu_data *ppmu_data;
- struct dev_pm_opp *opp;
- struct device *dev = &pdev->dev;
- struct device_node *np;
- unsigned long initial_freq;
- unsigned long initial_volt;
- int err = 0;
- int i;
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data_int),
- GFP_KERNEL);
- if (data == NULL) {
- dev_err(dev, "Cannot allocate memory.\n");
- return -ENOMEM;
- }
-
- ppmu_data = &data->ppmu_data;
- ppmu_data->ppmu_end = PPMU_END;
- ppmu_data->ppmu = devm_kzalloc(dev,
- sizeof(struct exynos_ppmu) * PPMU_END,
- GFP_KERNEL);
- if (!ppmu_data->ppmu) {
- dev_err(dev, "Failed to allocate memory for exynos_ppmu\n");
- return -ENOMEM;
- }
-
- np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-ppmu");
- if (np == NULL) {
- pr_err("Unable to find PPMU node\n");
- return -ENOENT;
- }
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- /* map PPMU memory region */
- ppmu_data->ppmu[i].hw_base = of_iomap(np, i);
- if (ppmu_data->ppmu[i].hw_base == NULL) {
- dev_err(&pdev->dev, "failed to map memory region\n");
- return -ENOMEM;
- }
- }
- data->pm_notifier.notifier_call = exynos5_busfreq_int_pm_notifier_event;
- data->dev = dev;
- mutex_init(&data->lock);
-
- err = exynos5250_init_int_tables(data);
- if (err)
- return err;
-
- data->vdd_int = devm_regulator_get(dev, "vdd_int");
- if (IS_ERR(data->vdd_int)) {
- dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- return PTR_ERR(data->vdd_int);
- }
-
- data->int_clk = devm_clk_get(dev, "int_clk");
- if (IS_ERR(data->int_clk)) {
- dev_err(dev, "Cannot get clock \"int_clk\"\n");
- return PTR_ERR(data->int_clk);
- }
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(dev,
- &exynos5_devfreq_int_profile.initial_freq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(dev, "Invalid initial frequency %lu kHz.\n",
- exynos5_devfreq_int_profile.initial_freq);
- return PTR_ERR(opp);
- }
- initial_freq = dev_pm_opp_get_freq(opp);
- initial_volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
- data->curr_freq = initial_freq;
-
- err = clk_set_rate(data->int_clk, initial_freq * 1000);
- if (err) {
- dev_err(dev, "Failed to set initial frequency\n");
- return err;
- }
-
- err = exynos5_int_setvolt(data, initial_volt);
- if (err)
- return err;
-
- platform_set_drvdata(pdev, data);
-
- busfreq_mon_reset(ppmu_data);
-
- data->devfreq = devm_devfreq_add_device(dev, &exynos5_devfreq_int_profile,
- "simple_ondemand", NULL);
- if (IS_ERR(data->devfreq))
- return PTR_ERR(data->devfreq);
-
- err = devm_devfreq_register_opp_notifier(dev, data->devfreq);
- if (err < 0) {
- dev_err(dev, "Failed to register opp notifier\n");
- return err;
- }
-
- err = register_pm_notifier(&data->pm_notifier);
- if (err) {
- dev_err(dev, "Failed to setup pm notifier\n");
- return err;
- }
-
- /* TODO: Add a new QOS class for int/mif bus */
- pm_qos_add_request(&data->int_req, PM_QOS_NETWORK_THROUGHPUT, -1);
-
- return 0;
-}
-
-static int exynos5_busfreq_int_remove(struct platform_device *pdev)
-{
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
-
- pm_qos_remove_request(&data->int_req);
- unregister_pm_notifier(&data->pm_notifier);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos5_busfreq_int_resume(struct device *dev)
-{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-
- busfreq_mon_reset(ppmu_data);
- return 0;
-}
-static const struct dev_pm_ops exynos5_busfreq_int_pm = {
- .resume = exynos5_busfreq_int_resume,
-};
-#endif
-static SIMPLE_DEV_PM_OPS(exynos5_busfreq_int_pm_ops, NULL,
- exynos5_busfreq_int_resume);
-
-/* platform device pointer for exynos5 devfreq device. */
-static struct platform_device *exynos5_devfreq_pdev;
-
-static struct platform_driver exynos5_busfreq_int_driver = {
- .probe = exynos5_busfreq_int_probe,
- .remove = exynos5_busfreq_int_remove,
- .driver = {
- .name = "exynos5-bus-int",
- .pm = &exynos5_busfreq_int_pm_ops,
- },
-};
-
-static int __init exynos5_busfreq_int_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&exynos5_busfreq_int_driver);
- if (ret < 0)
- goto out;
-
- exynos5_devfreq_pdev =
- platform_device_register_simple("exynos5-bus-int", -1, NULL, 0);
- if (IS_ERR(exynos5_devfreq_pdev)) {
- ret = PTR_ERR(exynos5_devfreq_pdev);
- goto out1;
- }
-
- return 0;
-out1:
- platform_driver_unregister(&exynos5_busfreq_int_driver);
-out:
- return ret;
-}
-late_initcall(exynos5_busfreq_int_init);
-
-static void __exit exynos5_busfreq_int_exit(void)
-{
- platform_device_unregister(exynos5_devfreq_pdev);
- platform_driver_unregister(&exynos5_busfreq_int_driver);
-}
-module_exit(exynos5_busfreq_int_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("EXYNOS5 busfreq driver with devfreq framework");
diff --git a/drivers/devfreq/exynos/exynos_ppmu.c b/drivers/devfreq/exynos/exynos_ppmu.c
deleted file mode 100644
index 97b75e513d29..000000000000
--- a/drivers/devfreq/exynos/exynos_ppmu.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS - PPMU support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-#include "exynos_ppmu.h"
-
-void exynos_ppmu_reset(void __iomem *ppmu_base)
-{
- __raw_writel(PPMU_CYCLE_RESET | PPMU_COUNTER_RESET, ppmu_base);
- __raw_writel(PPMU_ENABLE_CYCLE |
- PPMU_ENABLE_COUNT0 |
- PPMU_ENABLE_COUNT1 |
- PPMU_ENABLE_COUNT2 |
- PPMU_ENABLE_COUNT3,
- ppmu_base + PPMU_CNTENS);
-}
-
-void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch,
- unsigned int evt)
-{
- __raw_writel(evt, ppmu_base + PPMU_BEVTSEL(ch));
-}
-
-void exynos_ppmu_start(void __iomem *ppmu_base)
-{
- __raw_writel(PPMU_ENABLE, ppmu_base);
-}
-
-void exynos_ppmu_stop(void __iomem *ppmu_base)
-{
- __raw_writel(PPMU_DISABLE, ppmu_base);
-}
-
-unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch)
-{
- unsigned int total;
-
- if (ch == PPMU_PMNCNT3)
- total = ((__raw_readl(ppmu_base + PMCNT_OFFSET(ch)) << 8) |
- __raw_readl(ppmu_base + PMCNT_OFFSET(ch + 1)));
- else
- total = __raw_readl(ppmu_base + PMCNT_OFFSET(ch));
-
- return total;
-}
-
-void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data)
-{
- unsigned int i;
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- void __iomem *ppmu_base = ppmu_data->ppmu[i].hw_base;
-
- /* Reset the performance and cycle counters */
- exynos_ppmu_reset(ppmu_base);
-
- /* Setup count registers to monitor read/write transactions */
- ppmu_data->ppmu[i].event[PPMU_PMNCNT3] = RDWR_DATA_COUNT;
- exynos_ppmu_setevent(ppmu_base, PPMU_PMNCNT3,
- ppmu_data->ppmu[i].event[PPMU_PMNCNT3]);
-
- exynos_ppmu_start(ppmu_base);
- }
-}
-EXPORT_SYMBOL(busfreq_mon_reset);
-
-void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data)
-{
- int i, j;
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- void __iomem *ppmu_base = ppmu_data->ppmu[i].hw_base;
-
- exynos_ppmu_stop(ppmu_base);
-
- /* Update local data from PPMU */
- ppmu_data->ppmu[i].ccnt = __raw_readl(ppmu_base + PPMU_CCNT);
-
- for (j = PPMU_PMNCNT0; j < PPMU_PMNCNT_MAX; j++) {
- if (ppmu_data->ppmu[i].event[j] == 0)
- ppmu_data->ppmu[i].count[j] = 0;
- else
- ppmu_data->ppmu[i].count[j] =
- exynos_ppmu_read(ppmu_base, j);
- }
- }
-
- busfreq_mon_reset(ppmu_data);
-}
-EXPORT_SYMBOL(exynos_read_ppmu);
-
-int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data)
-{
- unsigned int count = 0;
- int i, j, busy = 0;
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- for (j = PPMU_PMNCNT0; j < PPMU_PMNCNT_MAX; j++) {
- if (ppmu_data->ppmu[i].count[j] > count) {
- count = ppmu_data->ppmu[i].count[j];
- busy = i;
- }
- }
- }
-
- return busy;
-}
-EXPORT_SYMBOL(exynos_get_busier_ppmu);
diff --git a/drivers/devfreq/exynos/exynos_ppmu.h b/drivers/devfreq/exynos/exynos_ppmu.h
deleted file mode 100644
index 71f17ba3563c..000000000000
--- a/drivers/devfreq/exynos/exynos_ppmu.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS PPMU header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __DEVFREQ_EXYNOS_PPMU_H
-#define __DEVFREQ_EXYNOS_PPMU_H __FILE__
-
-#include <linux/ktime.h>
-
-/* For PPMU Control */
-#define PPMU_ENABLE BIT(0)
-#define PPMU_DISABLE 0x0
-#define PPMU_CYCLE_RESET BIT(1)
-#define PPMU_COUNTER_RESET BIT(2)
-
-#define PPMU_ENABLE_COUNT0 BIT(0)
-#define PPMU_ENABLE_COUNT1 BIT(1)
-#define PPMU_ENABLE_COUNT2 BIT(2)
-#define PPMU_ENABLE_COUNT3 BIT(3)
-#define PPMU_ENABLE_CYCLE BIT(31)
-
-#define PPMU_CNTENS 0x10
-#define PPMU_FLAG 0x50
-#define PPMU_CCNT_OVERFLOW BIT(31)
-#define PPMU_CCNT 0x100
-
-#define PPMU_PMCNT0 0x110
-#define PPMU_PMCNT_OFFSET 0x10
-#define PMCNT_OFFSET(x) (PPMU_PMCNT0 + (PPMU_PMCNT_OFFSET * x))
-
-#define PPMU_BEVT0SEL 0x1000
-#define PPMU_BEVTSEL_OFFSET 0x100
-#define PPMU_BEVTSEL(x) (PPMU_BEVT0SEL + (ch * PPMU_BEVTSEL_OFFSET))
-
-/* For Event Selection */
-#define RD_DATA_COUNT 0x5
-#define WR_DATA_COUNT 0x6
-#define RDWR_DATA_COUNT 0x7
-
-enum ppmu_counter {
- PPMU_PMNCNT0,
- PPMU_PMCCNT1,
- PPMU_PMNCNT2,
- PPMU_PMNCNT3,
- PPMU_PMNCNT_MAX,
-};
-
-struct bus_opp_table {
- unsigned int idx;
- unsigned long clk;
- unsigned long volt;
-};
-
-struct exynos_ppmu {
- void __iomem *hw_base;
- unsigned int ccnt;
- unsigned int event[PPMU_PMNCNT_MAX];
- unsigned int count[PPMU_PMNCNT_MAX];
- unsigned long long ns;
- ktime_t reset_time;
- bool ccnt_overflow;
- bool count_overflow[PPMU_PMNCNT_MAX];
-};
-
-struct busfreq_ppmu_data {
- struct exynos_ppmu *ppmu;
- int ppmu_end;
-};
-
-void exynos_ppmu_reset(void __iomem *ppmu_base);
-void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch,
- unsigned int evt);
-void exynos_ppmu_start(void __iomem *ppmu_base);
-void exynos_ppmu_stop(void __iomem *ppmu_base);
-unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch);
-void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data);
-void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data);
-int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data);
-#endif /* __DEVFREQ_EXYNOS_PPMU_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
new file mode 100644
index 000000000000..9ef46e2592c4
--- /dev/null
+++ b/drivers/devfreq/governor_passive.c
@@ -0,0 +1,205 @@
+/*
+ * linux/drivers/devfreq/governor_passive.c
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+ unsigned long *freq)
+{
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
+ unsigned long child_freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int i, count, ret = 0;
+
+ /*
+ * If the devfreq device with passive governor has the specific method
+ * to determine the next frequency, should use the get_target_freq()
+ * of struct devfreq_passive_data.
+ */
+ if (p_data->get_target_freq) {
+ ret = p_data->get_target_freq(devfreq, freq);
+ goto out;
+ }
+
+ /*
+ * If the parent and passive devfreq device uses the OPP table,
+ * get the next frequency by using the OPP table.
+ */
+
+ /*
+ * - parent devfreq device uses the governors except for passive.
+ * - passive devfreq device uses the passive governor.
+ *
+ * Each devfreq has the OPP table. After deciding the new frequency
+ * from the governor of parent devfreq device, the passive governor
+ * need to get the index of new frequency on OPP table of parent
+ * device. And then the index is used for getting the suitable
+ * new frequency for passive devfreq device.
+ */
+ if (!devfreq->profile || !devfreq->profile->freq_table
+ || devfreq->profile->max_state <= 0)
+ return -EINVAL;
+
+ /*
+ * The passive governor have to get the correct frequency from OPP
+ * list of parent device. Because in this case, *freq is temporary
+ * value which is decided by ondemand governor.
+ */
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
+ rcu_read_unlock();
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out;
+ }
+
+ /*
+ * Get the OPP table's index of decided freqeuncy by governor
+ * of parent device.
+ */
+ for (i = 0; i < parent_devfreq->profile->max_state; i++)
+ if (parent_devfreq->profile->freq_table[i] == *freq)
+ break;
+
+ if (i == parent_devfreq->profile->max_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the suitable frequency by using index of parent device. */
+ if (i < devfreq->profile->max_state) {
+ child_freq = devfreq->profile->freq_table[i];
+ } else {
+ count = devfreq->profile->max_state;
+ child_freq = devfreq->profile->freq_table[count - 1];
+ }
+
+ /* Return the suitable frequency for passive device. */
+ *freq = child_freq;
+
+out:
+ return ret;
+}
+
+static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
+{
+ int ret;
+
+ if (!devfreq->governor)
+ return -EINVAL;
+
+ mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
+
+ ret = devfreq->governor->get_target_freq(devfreq, &freq);
+ if (ret < 0)
+ goto out;
+
+ ret = devfreq->profile->target(devfreq->dev.parent, &freq, 0);
+ if (ret < 0)
+ goto out;
+
+ devfreq->previous_freq = freq;
+
+out:
+ mutex_unlock(&devfreq->lock);
+
+ return 0;
+}
+
+static int devfreq_passive_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct devfreq_passive_data *data
+ = container_of(nb, struct devfreq_passive_data, nb);
+ struct devfreq *devfreq = (struct devfreq *)data->this;
+ struct devfreq *parent = (struct devfreq *)data->parent;
+ struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
+ unsigned long freq = freqs->new;
+
+ switch (event) {
+ case DEVFREQ_PRECHANGE:
+ if (parent->previous_freq > freq)
+ update_devfreq_passive(devfreq, freq);
+ break;
+ case DEVFREQ_POSTCHANGE:
+ if (parent->previous_freq < freq)
+ update_devfreq_passive(devfreq, freq);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int devfreq_passive_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ struct device *dev = devfreq->dev.parent;
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq *parent = (struct devfreq *)p_data->parent;
+ struct notifier_block *nb = &p_data->nb;
+ int ret = 0;
+
+ if (!parent)
+ return -EPROBE_DEFER;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ if (!p_data->this)
+ p_data->this = devfreq;
+
+ nb->notifier_call = devfreq_passive_notifier_call;
+ ret = devm_devfreq_register_notifier(dev, parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER);
+ break;
+ case DEVFREQ_GOV_STOP:
+ devm_devfreq_unregister_notifier(dev, parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct devfreq_governor devfreq_passive = {
+ .name = "passive",
+ .get_target_freq = devfreq_passive_get_target_freq,
+ .event_handler = devfreq_passive_event_handler,
+};
+
+static int __init devfreq_passive_init(void)
+{
+ return devfreq_add_governor(&devfreq_passive);
+}
+subsys_initcall(devfreq_passive_init);
+
+static void __exit devfreq_passive_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_passive);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+}
+module_exit(devfreq_passive_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("DEVFREQ Passive governor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 37755e63cc28..6ca7474baf4a 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -378,12 +378,11 @@ config EDAC_ALTERA
config EDAC_ALTERA_L2C
bool "Altera L2 Cache ECC"
- depends on EDAC_ALTERA=y
- select CACHE_L2X0
+ depends on EDAC_ALTERA=y && CACHE_L2X0
help
Support for error detection and correction on the
Altera L2 cache Memory for Altera SoCs. This option
- requires L2 cache so it will force that selection.
+ requires L2 cache.
config EDAC_ALTERA_OCRAM
bool "Altera On-Chip RAM ECC"
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 63e42098726d..5b4d223d6d68 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -78,27 +79,6 @@ static const struct altr_sdram_prv_data a10_data = {
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
-/************************** EDAC Device Defines **************************/
-
-/* OCRAM ECC Management Group Defines */
-#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET 0x04
-#define ALTR_OCR_ECC_EN BIT(0)
-#define ALTR_OCR_ECC_INJS BIT(1)
-#define ALTR_OCR_ECC_INJD BIT(2)
-#define ALTR_OCR_ECC_SERR BIT(3)
-#define ALTR_OCR_ECC_DERR BIT(4)
-
-/* L2 ECC Management Group Defines */
-#define ALTR_MAN_GRP_L2_ECC_OFFSET 0x00
-#define ALTR_L2_ECC_EN BIT(0)
-#define ALTR_L2_ECC_INJS BIT(1)
-#define ALTR_L2_ECC_INJD BIT(2)
-
-#define ALTR_UE_TRIGGER_CHAR 'U' /* Trigger for UE */
-#define ALTR_TRIGGER_READ_WRD_CNT 32 /* Line size x 4 */
-#define ALTR_TRIG_OCRAM_BYTE_SIZE 128 /* Line size x 4 */
-#define ALTR_TRIG_L2C_BYTE_SIZE 4096 /* Full Page */
-
/*********************** EDAC Memory Controller Functions ****************/
/* The SDRAM controller uses the EDAC Memory Controller framework. */
@@ -252,8 +232,8 @@ static unsigned long get_total_mem(void)
}
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
- { .compatible = "altr,sdram-edac", .data = (void *)&c5_data},
- { .compatible = "altr,sdram-edac-a10", .data = (void *)&a10_data},
+ { .compatible = "altr,sdram-edac", .data = &c5_data},
+ { .compatible = "altr,sdram-edac-a10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -570,28 +550,8 @@ module_platform_driver(altr_edac_driver);
const struct edac_device_prv_data ocramecc_data;
const struct edac_device_prv_data l2ecc_data;
-
-struct edac_device_prv_data {
- int (*setup)(struct platform_device *pdev, void __iomem *base);
- int ce_clear_mask;
- int ue_clear_mask;
- char dbgfs_name[20];
- void * (*alloc_mem)(size_t size, void **other);
- void (*free_mem)(void *p, size_t size, void *other);
- int ecc_enable_mask;
- int ce_set_mask;
- int ue_set_mask;
- int trig_alloc_sz;
-};
-
-struct altr_edac_device_dev {
- void __iomem *base;
- int sb_irq;
- int db_irq;
- const struct edac_device_prv_data *data;
- struct dentry *debugfs_dir;
- char *edac_dev_name;
-};
+const struct edac_device_prv_data a10_ocramecc_data;
+const struct edac_device_prv_data a10_l2ecc_data;
static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
{
@@ -665,8 +625,9 @@ static ssize_t altr_edac_device_trig(struct file *file,
if (ACCESS_ONCE(ptemp[i]))
result = -1;
/* Toggle Error bit (it is latched), leave ECC enabled */
- writel(error_mask, drvdata->base);
- writel(priv->ecc_enable_mask, drvdata->base);
+ writel(error_mask, (drvdata->base + priv->set_err_ofst));
+ writel(priv->ecc_enable_mask, (drvdata->base +
+ priv->set_err_ofst));
ptemp[i] = i;
}
/* Ensure it has been written out */
@@ -694,6 +655,16 @@ static const struct file_operations altr_edac_device_inject_fops = {
.llseek = generic_file_llseek,
};
+static ssize_t altr_edac_a10_device_trig(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations altr_edac_a10_device_inject_fops = {
+ .open = simple_open,
+ .write = altr_edac_a10_device_trig,
+ .llseek = generic_file_llseek,
+};
+
static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
const struct edac_device_prv_data *priv)
{
@@ -708,17 +679,18 @@ static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
if (!edac_debugfs_create_file(priv->dbgfs_name, S_IWUSR,
drvdata->debugfs_dir, edac_dci,
- &altr_edac_device_inject_fops))
+ priv->inject_fops))
debugfs_remove_recursive(drvdata->debugfs_dir);
}
static const struct of_device_id altr_edac_device_of_match[] = {
#ifdef CONFIG_EDAC_ALTERA_L2C
- { .compatible = "altr,socfpga-l2-ecc", .data = (void *)&l2ecc_data },
+ { .compatible = "altr,socfpga-l2-ecc", .data = &l2ecc_data },
+ { .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_OCRAM
- { .compatible = "altr,socfpga-ocram-ecc",
- .data = (void *)&ocramecc_data },
+ { .compatible = "altr,socfpga-ocram-ecc", .data = &ocramecc_data },
+ { .compatible = "altr,socfpga-a10-ocram-ecc", .data = &a10_ocramecc_data },
#endif
{},
};
@@ -789,7 +761,7 @@ static int altr_edac_device_probe(struct platform_device *pdev)
/* Check specific dependencies for the module */
if (drvdata->data->setup) {
- res = drvdata->data->setup(pdev, drvdata->base);
+ res = drvdata->data->setup(drvdata);
if (res)
goto fail1;
}
@@ -856,6 +828,25 @@ module_platform_driver(altr_edac_device_driver);
/*********************** OCRAM EDAC Device Functions *********************/
#ifdef CONFIG_EDAC_ALTERA_OCRAM
+/*
+ * Test for memory's ECC dependencies upon entry because platform specific
+ * startup should have initialized the memory and enabled the ECC.
+ * Can't turn on ECC here because accessing un-initialized memory will
+ * cause CE/UE errors possibly causing an ABORT.
+ */
+static int altr_check_ecc_deps(struct altr_edac_device_dev *device)
+{
+ void __iomem *base = device->base;
+ const struct edac_device_prv_data *prv = device->data;
+
+ if (readl(base + prv->ecc_en_ofst) & prv->ecc_enable_mask)
+ return 0;
+
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: No ECC present or ECC disabled.\n",
+ device->edac_dev_name);
+ return -ENODEV;
+}
static void *ocram_alloc_mem(size_t size, void **other)
{
@@ -891,36 +882,53 @@ static void ocram_free_mem(void *p, size_t size, void *other)
gen_pool_free((struct gen_pool *)other, (u32)p, size);
}
-/*
- * altr_ocram_check_deps()
- * Test for OCRAM cache ECC dependencies upon entry because
- * platform specific startup should have initialized the
- * On-Chip RAM memory and enabled the ECC.
- * Can't turn on ECC here because accessing un-initialized
- * memory will cause CE/UE errors possibly causing an ABORT.
- */
-static int altr_ocram_check_deps(struct platform_device *pdev,
- void __iomem *base)
+static irqreturn_t altr_edac_a10_ecc_irq(struct altr_edac_device_dev *dci,
+ bool sberr)
{
- if (readl(base) & ALTR_OCR_ECC_EN)
- return 0;
+ void __iomem *base = dci->base;
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "OCRAM: No ECC present or ECC disabled.\n");
- return -ENODEV;
+ if (sberr) {
+ writel(ALTR_A10_ECC_SERRPENA,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ } else {
+ writel(ALTR_A10_ECC_DERRPENA,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+ }
+ return IRQ_HANDLED;
}
const struct edac_device_prv_data ocramecc_data = {
- .setup = altr_ocram_check_deps,
+ .setup = altr_check_ecc_deps,
.ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
.ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
.dbgfs_name = "altr_ocram_trigger",
.alloc_mem = ocram_alloc_mem,
.free_mem = ocram_free_mem,
.ecc_enable_mask = ALTR_OCR_ECC_EN,
+ .ecc_en_ofst = ALTR_OCR_ECC_REG_OFFSET,
.ce_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJS),
.ue_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJD),
+ .set_err_ofst = ALTR_OCR_ECC_REG_OFFSET,
.trig_alloc_sz = ALTR_TRIG_OCRAM_BYTE_SIZE,
+ .inject_fops = &altr_edac_device_inject_fops,
+};
+
+const struct edac_device_prv_data a10_ocramecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .irq_status_mask = A10_SYSMGR_ECC_INTSTAT_OCRAM,
+ .dbgfs_name = "altr_ocram_trigger",
+ .ecc_enable_mask = ALTR_A10_OCRAM_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRA,
+ .ue_set_mask = ALTR_A10_ECC_TDERRA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_OCRAM */
@@ -966,10 +974,13 @@ static void l2_free_mem(void *p, size_t size, void *other)
* Bail if ECC is not enabled.
* Note that L2 Cache Enable is forced at build time.
*/
-static int altr_l2_check_deps(struct platform_device *pdev,
- void __iomem *base)
+static int altr_l2_check_deps(struct altr_edac_device_dev *device)
{
- if (readl(base) & ALTR_L2_ECC_EN)
+ void __iomem *base = device->base;
+ const struct edac_device_prv_data *prv = device->data;
+
+ if ((readl(base) & prv->ecc_enable_mask) ==
+ prv->ecc_enable_mask)
return 0;
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -977,6 +988,24 @@ static int altr_l2_check_deps(struct platform_device *pdev,
return -ENODEV;
}
+static irqreturn_t altr_edac_a10_l2_irq(struct altr_edac_device_dev *dci,
+ bool sberr)
+{
+ if (sberr) {
+ regmap_write(dci->edac->ecc_mgr_map,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_SB);
+ edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ } else {
+ regmap_write(dci->edac->ecc_mgr_map,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_MB);
+ edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+ }
+ return IRQ_HANDLED;
+}
+
const struct edac_device_prv_data l2ecc_data = {
.setup = altr_l2_check_deps,
.ce_clear_mask = 0,
@@ -987,11 +1016,252 @@ const struct edac_device_prv_data l2ecc_data = {
.ecc_enable_mask = ALTR_L2_ECC_EN,
.ce_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJS),
.ue_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJD),
+ .set_err_ofst = ALTR_L2_ECC_REG_OFFSET,
+ .trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+ .inject_fops = &altr_edac_device_inject_fops,
+};
+
+const struct edac_device_prv_data a10_l2ecc_data = {
+ .setup = altr_l2_check_deps,
+ .ce_clear_mask = ALTR_A10_L2_ECC_SERR_CLR,
+ .ue_clear_mask = ALTR_A10_L2_ECC_MERR_CLR,
+ .irq_status_mask = A10_SYSMGR_ECC_INTSTAT_L2,
+ .dbgfs_name = "altr_l2_trigger",
+ .alloc_mem = l2_alloc_mem,
+ .free_mem = l2_free_mem,
+ .ecc_enable_mask = ALTR_A10_L2_ECC_EN_CTL,
+ .ce_set_mask = ALTR_A10_L2_ECC_CE_INJ_MASK,
+ .ue_set_mask = ALTR_A10_L2_ECC_UE_INJ_MASK,
+ .set_err_ofst = ALTR_A10_L2_ECC_INJ_OFST,
+ .ecc_irq_handler = altr_edac_a10_l2_irq,
.trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+ .inject_fops = &altr_edac_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_L2C */
+/********************* Arria10 EDAC Device Functions *************************/
+
+/*
+ * The Arria10 EDAC Device Functions differ from the Cyclone5/Arria5
+ * because 2 IRQs are shared among the all ECC peripherals. The ECC
+ * manager manages the IRQs and the children.
+ * Based on xgene_edac.c peripheral code.
+ */
+
+static ssize_t altr_edac_a10_device_trig(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dci = file->private_data;
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+ void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
+ unsigned long flags;
+ u8 trig_type;
+
+ if (!user_buf || get_user(trig_type, user_buf))
+ return -EFAULT;
+
+ local_irq_save(flags);
+ if (trig_type == ALTR_UE_TRIGGER_CHAR)
+ writel(priv->ue_set_mask, set_addr);
+ else
+ writel(priv->ce_set_mask, set_addr);
+ /* Ensure the interrupt test bits are set */
+ wmb();
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static irqreturn_t altr_edac_a10_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t rc = IRQ_NONE;
+ struct altr_arria10_edac *edac = dev_id;
+ struct altr_edac_device_dev *dci;
+ int irq_status;
+ bool sberr = (irq == edac->sb_irq) ? 1 : 0;
+ int sm_offset = sberr ? A10_SYSMGR_ECC_INTSTAT_SERR_OFST :
+ A10_SYSMGR_ECC_INTSTAT_DERR_OFST;
+
+ regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
+
+ if ((irq != edac->sb_irq) && (irq != edac->db_irq)) {
+ WARN_ON(1);
+ } else {
+ list_for_each_entry(dci, &edac->a10_ecc_devices, next) {
+ if (irq_status & dci->data->irq_status_mask)
+ rc = dci->data->ecc_irq_handler(dci, sberr);
+ }
+ }
+
+ return rc;
+}
+
+static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
+ struct device_node *np)
+{
+ struct edac_device_ctl_info *dci;
+ struct altr_edac_device_dev *altdev;
+ char *ecc_name = (char *)np->name;
+ struct resource res;
+ int edac_idx;
+ int rc = 0;
+ const struct edac_device_prv_data *prv;
+ /* Get matching node and check for valid result */
+ const struct of_device_id *pdev_id =
+ of_match_node(altr_edac_device_of_match, np);
+ if (IS_ERR_OR_NULL(pdev_id))
+ return -ENODEV;
+
+ /* Get driver specific data for this EDAC device */
+ prv = pdev_id->data;
+ if (IS_ERR_OR_NULL(prv))
+ return -ENODEV;
+
+ if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
+ return -ENOMEM;
+
+ rc = of_address_to_resource(np, 0, &res);
+ if (rc < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: no resource address\n", ecc_name);
+ goto err_release_group;
+ }
+
+ edac_idx = edac_device_alloc_index();
+ dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name,
+ 1, ecc_name, 1, 0, NULL, 0,
+ edac_idx);
+
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: Unable to allocate EDAC device\n", ecc_name);
+ rc = -ENOMEM;
+ goto err_release_group;
+ }
+
+ altdev = dci->pvt_info;
+ dci->dev = edac->dev;
+ altdev->edac_dev_name = ecc_name;
+ altdev->edac_idx = edac_idx;
+ altdev->edac = edac;
+ altdev->edac_dev = dci;
+ altdev->data = prv;
+ altdev->ddev = *edac->dev;
+ dci->dev = &altdev->ddev;
+ dci->ctl_name = "Altera ECC Manager";
+ dci->mod_name = ecc_name;
+ dci->dev_name = ecc_name;
+
+ altdev->base = devm_ioremap_resource(edac->dev, &res);
+ if (IS_ERR(altdev->base)) {
+ rc = PTR_ERR(altdev->base);
+ goto err_release_group1;
+ }
+
+ /* Check specific dependencies for the module */
+ if (altdev->data->setup) {
+ rc = altdev->data->setup(altdev);
+ if (rc)
+ goto err_release_group1;
+ }
+
+ rc = edac_device_add_device(dci);
+ if (rc) {
+ dev_err(edac->dev, "edac_device_add_device failed\n");
+ rc = -ENOMEM;
+ goto err_release_group1;
+ }
+
+ altr_create_edacdev_dbgfs(dci, prv);
+
+ list_add(&altdev->next, &edac->a10_ecc_devices);
+
+ devres_remove_group(edac->dev, altr_edac_a10_device_add);
+
+ return 0;
+
+err_release_group1:
+ edac_device_free_ctl_info(dci);
+err_release_group:
+ edac_printk(KERN_ALERT, EDAC_DEVICE, "%s: %d\n", __func__, __LINE__);
+ devres_release_group(edac->dev, NULL);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s:Error setting up EDAC device: %d\n", ecc_name, rc);
+
+ return rc;
+}
+
+static int altr_edac_a10_probe(struct platform_device *pdev)
+{
+ struct altr_arria10_edac *edac;
+ struct device_node *child;
+ int rc;
+
+ edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
+ if (!edac)
+ return -ENOMEM;
+
+ edac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac);
+ INIT_LIST_HEAD(&edac->a10_ecc_devices);
+
+ edac->ecc_mgr_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "altr,sysmgr-syscon");
+ if (IS_ERR(edac->ecc_mgr_map)) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to get syscon altr,sysmgr-syscon\n");
+ return PTR_ERR(edac->ecc_mgr_map);
+ }
+
+ edac->sb_irq = platform_get_irq(pdev, 0);
+ rc = devm_request_irq(&pdev->dev, edac->sb_irq,
+ altr_edac_a10_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), edac);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n");
+ return rc;
+ }
+
+ edac->db_irq = platform_get_irq(pdev, 1);
+ rc = devm_request_irq(&pdev->dev, edac->db_irq,
+ altr_edac_a10_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), edac);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
+ return rc;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, child) {
+ if (!of_device_is_available(child))
+ continue;
+ if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc"))
+ altr_edac_a10_device_add(edac, child);
+ else if (of_device_is_compatible(child,
+ "altr,socfpga-a10-ocram-ecc"))
+ altr_edac_a10_device_add(edac, child);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id altr_edac_a10_of_match[] = {
+ { .compatible = "altr,socfpga-a10-ecc-manager" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_a10_of_match);
+
+static struct platform_driver altr_edac_a10_driver = {
+ .probe = altr_edac_a10_probe,
+ .driver = {
+ .name = "socfpga_a10_ecc_manager",
+ .of_match_table = altr_edac_a10_of_match,
+ },
+};
+module_platform_driver(altr_edac_a10_driver);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 953077d3e4f3..42090f36ba6e 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -195,4 +195,132 @@ struct altr_sdram_mc_data {
const struct altr_sdram_prv_data *data;
};
+/************************** EDAC Device Defines **************************/
+/***** General Device Trigger Defines *****/
+#define ALTR_UE_TRIGGER_CHAR 'U' /* Trigger for UE */
+#define ALTR_TRIGGER_READ_WRD_CNT 32 /* Line size x 4 */
+#define ALTR_TRIG_OCRAM_BYTE_SIZE 128 /* Line size x 4 */
+#define ALTR_TRIG_L2C_BYTE_SIZE 4096 /* Full Page */
+
+/******* Cyclone5 and Arria5 Defines *******/
+/* OCRAM ECC Management Group Defines */
+#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET 0x04
+#define ALTR_OCR_ECC_REG_OFFSET 0x00
+#define ALTR_OCR_ECC_EN BIT(0)
+#define ALTR_OCR_ECC_INJS BIT(1)
+#define ALTR_OCR_ECC_INJD BIT(2)
+#define ALTR_OCR_ECC_SERR BIT(3)
+#define ALTR_OCR_ECC_DERR BIT(4)
+
+/* L2 ECC Management Group Defines */
+#define ALTR_MAN_GRP_L2_ECC_OFFSET 0x00
+#define ALTR_L2_ECC_REG_OFFSET 0x00
+#define ALTR_L2_ECC_EN BIT(0)
+#define ALTR_L2_ECC_INJS BIT(1)
+#define ALTR_L2_ECC_INJD BIT(2)
+
+/* Arria10 General ECC Block Module Defines */
+#define ALTR_A10_ECC_CTRL_OFST 0x08
+#define ALTR_A10_ECC_EN BIT(0)
+#define ALTR_A10_ECC_INITA BIT(16)
+#define ALTR_A10_ECC_INITB BIT(24)
+
+#define ALTR_A10_ECC_INITSTAT_OFST 0x0C
+#define ALTR_A10_ECC_INITCOMPLETEA BIT(0)
+#define ALTR_A10_ECC_INITCOMPLETEB BIT(8)
+
+#define ALTR_A10_ECC_ERRINTEN_OFST 0x10
+#define ALTR_A10_ECC_SERRINTEN BIT(0)
+
+#define ALTR_A10_ECC_INTSTAT_OFST 0x20
+#define ALTR_A10_ECC_SERRPENA BIT(0)
+#define ALTR_A10_ECC_DERRPENA BIT(8)
+#define ALTR_A10_ECC_ERRPENA_MASK (ALTR_A10_ECC_SERRPENA | \
+ ALTR_A10_ECC_DERRPENA)
+#define ALTR_A10_ECC_SERRPENB BIT(16)
+#define ALTR_A10_ECC_DERRPENB BIT(24)
+#define ALTR_A10_ECC_ERRPENB_MASK (ALTR_A10_ECC_SERRPENB | \
+ ALTR_A10_ECC_DERRPENB)
+
+#define ALTR_A10_ECC_INTTEST_OFST 0x24
+#define ALTR_A10_ECC_TSERRA BIT(0)
+#define ALTR_A10_ECC_TDERRA BIT(8)
+
+/* ECC Manager Defines */
+#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
+#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
+#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
+
+#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
+#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
+#define A10_SYSMGR_ECC_INTSTAT_L2 BIT(0)
+#define A10_SYSMGR_ECC_INTSTAT_OCRAM BIT(1)
+
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST 0xA8
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_SB BIT(15)
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_MB BIT(31)
+
+/* Arria 10 L2 ECC Management Group Defines */
+#define ALTR_A10_L2_ECC_CTL_OFST 0x0
+#define ALTR_A10_L2_ECC_EN_CTL BIT(0)
+
+#define ALTR_A10_L2_ECC_STATUS 0xFFD060A4
+#define ALTR_A10_L2_ECC_STAT_OFST 0xA4
+#define ALTR_A10_L2_ECC_SERR_PEND BIT(0)
+#define ALTR_A10_L2_ECC_MERR_PEND BIT(0)
+
+#define ALTR_A10_L2_ECC_CLR_OFST 0x4
+#define ALTR_A10_L2_ECC_SERR_CLR BIT(15)
+#define ALTR_A10_L2_ECC_MERR_CLR BIT(31)
+
+#define ALTR_A10_L2_ECC_INJ_OFST ALTR_A10_L2_ECC_CTL_OFST
+#define ALTR_A10_L2_ECC_CE_INJ_MASK 0x00000101
+#define ALTR_A10_L2_ECC_UE_INJ_MASK 0x00010101
+
+/* Arria 10 OCRAM ECC Management Group Defines */
+#define ALTR_A10_OCRAM_ECC_EN_CTL (BIT(1) | BIT(0))
+
+struct altr_edac_device_dev;
+
+struct edac_device_prv_data {
+ int (*setup)(struct altr_edac_device_dev *device);
+ int ce_clear_mask;
+ int ue_clear_mask;
+ int irq_status_mask;
+ char dbgfs_name[20];
+ void * (*alloc_mem)(size_t size, void **other);
+ void (*free_mem)(void *p, size_t size, void *other);
+ int ecc_enable_mask;
+ int ecc_en_ofst;
+ int ce_set_mask;
+ int ue_set_mask;
+ int set_err_ofst;
+ irqreturn_t (*ecc_irq_handler)(struct altr_edac_device_dev *dci,
+ bool sb);
+ int trig_alloc_sz;
+ const struct file_operations *inject_fops;
+};
+
+struct altr_edac_device_dev {
+ struct list_head next;
+ void __iomem *base;
+ int sb_irq;
+ int db_irq;
+ const struct edac_device_prv_data *data;
+ struct dentry *debugfs_dir;
+ char *edac_dev_name;
+ struct altr_arria10_edac *edac;
+ struct edac_device_ctl_info *edac_dev;
+ struct device ddev;
+ int edac_idx;
+};
+
+struct altr_arria10_edac {
+ struct device *dev;
+ struct regmap *ecc_mgr_map;
+ int sb_irq;
+ int db_irq;
+ struct list_head a10_ecc_devices;
+};
+
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index d87a47547ba5..624e2f78339c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,11 +15,6 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
-/*
- * count successfully initialized driver instances for setup_pci_device()
- */
-static atomic_t drv_instances = ATOMIC_INIT(0);
-
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
@@ -1918,7 +1913,7 @@ static struct amd64_family_type family_types[] = {
[K8_CPUS] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
- .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
+ .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
.ops = {
.early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
@@ -1928,7 +1923,7 @@ static struct amd64_family_type family_types[] = {
[F10_CPUS] = {
.ctl_name = "F10h",
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
- .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
+ .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1938,7 +1933,7 @@ static struct amd64_family_type family_types[] = {
[F15_CPUS] = {
.ctl_name = "F15h",
.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1948,7 +1943,7 @@ static struct amd64_family_type family_types[] = {
[F15_M30H_CPUS] = {
.ctl_name = "F15h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1958,7 +1953,7 @@ static struct amd64_family_type family_types[] = {
[F15_M60H_CPUS] = {
.ctl_name = "F15h_M60h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1968,7 +1963,7 @@ static struct amd64_family_type family_types[] = {
[F16_CPUS] = {
.ctl_name = "F16h",
.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1978,7 +1973,7 @@ static struct amd64_family_type family_types[] = {
[F16_M30H_CPUS] = {
.ctl_name = "F16h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2227,13 +2222,13 @@ static inline void decode_bus_error(int node_id, struct mce *m)
}
/*
- * Use pvt->F2 which contains the F2 CPU PCI device to get the related
- * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
+ * Use pvt->F3 which contains the F3 CPU PCI device to get the related
+ * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
*/
-static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
+static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
{
/* Reserve the ADDRESS MAP Device */
- pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
+ pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
if (!pvt->F1) {
amd64_err("error address map device not found: "
"vendor %x device 0x%x (broken BIOS?)\n",
@@ -2241,15 +2236,15 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
return -ENODEV;
}
- /* Reserve the MISC Device */
- pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
- if (!pvt->F3) {
+ /* Reserve the DCT Device */
+ pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
+ if (!pvt->F2) {
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
- amd64_err("error F3 device not found: "
+ amd64_err("error F2 device not found: "
"vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f3_id);
+ PCI_VENDOR_ID_AMD, f2_id);
return -ENODEV;
}
@@ -2263,7 +2258,7 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{
pci_dev_put(pvt->F1);
- pci_dev_put(pvt->F3);
+ pci_dev_put(pvt->F2);
}
/*
@@ -2778,14 +2773,14 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
-static int init_one_instance(struct pci_dev *F2)
+static int init_one_instance(unsigned int nid)
{
- struct amd64_pvt *pvt = NULL;
+ struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
+ struct amd64_pvt *pvt = NULL;
int err = 0, ret;
- u16 nid = amd_pci_dev_to_node_id(F2);
ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2793,7 +2788,7 @@ static int init_one_instance(struct pci_dev *F2)
goto err_ret;
pvt->mc_node_id = nid;
- pvt->F2 = F2;
+ pvt->F3 = F3;
ret = -EINVAL;
fam_type = per_family_init(pvt);
@@ -2801,7 +2796,7 @@ static int init_one_instance(struct pci_dev *F2)
goto err_free;
ret = -ENODEV;
- err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
+ err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
if (err)
goto err_free;
@@ -2836,7 +2831,7 @@ static int init_one_instance(struct pci_dev *F2)
goto err_siblings;
mci->pvt_info = pvt;
- mci->pdev = &pvt->F2->dev;
+ mci->pdev = &pvt->F3->dev;
setup_mci_misc_attrs(mci, fam_type);
@@ -2855,8 +2850,6 @@ static int init_one_instance(struct pci_dev *F2)
amd_register_ecc_decoder(decode_bus_error);
- atomic_inc(&drv_instances);
-
return 0;
err_add_mc:
@@ -2872,19 +2865,11 @@ err_ret:
return ret;
}
-static int probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
+static int probe_one_instance(unsigned int nid)
{
- u16 nid = amd_pci_dev_to_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
- int ret = 0;
-
- ret = pci_enable_device(pdev);
- if (ret < 0) {
- edac_dbg(0, "ret=%d\n", ret);
- return -EIO;
- }
+ int ret;
ret = -ENOMEM;
s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
@@ -2905,7 +2890,7 @@ static int probe_one_instance(struct pci_dev *pdev,
goto err_enable;
}
- ret = init_one_instance(pdev);
+ ret = init_one_instance(nid);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
restore_ecc_error_reporting(s, nid, F3);
@@ -2921,19 +2906,18 @@ err_out:
return ret;
}
-static void remove_one_instance(struct pci_dev *pdev)
+static void remove_one_instance(unsigned int nid)
{
- struct mem_ctl_info *mci;
- struct amd64_pvt *pvt;
- u16 nid = amd_pci_dev_to_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
- mci = find_mci_by_dev(&pdev->dev);
+ mci = find_mci_by_dev(&F3->dev);
WARN_ON(!mci);
/* Remove from EDAC CORE tracking list */
- mci = edac_mc_del_mc(&pdev->dev);
+ mci = edac_mc_del_mc(&F3->dev);
if (!mci)
return;
@@ -2957,31 +2941,6 @@ static void remove_one_instance(struct pci_dev *pdev)
edac_mc_free(mci);
}
-/*
- * This table is part of the interface for loading drivers for PCI devices. The
- * PCI core identifies what devices are on a system during boot, and then
- * inquiry this table to see if this driver is for a given device found.
- */
-static const struct pci_device_id amd64_pci_table[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F2) },
- {0, }
-};
-MODULE_DEVICE_TABLE(pci, amd64_pci_table);
-
-static struct pci_driver amd64_pci_driver = {
- .name = EDAC_MOD_STR,
- .probe = probe_one_instance,
- .remove = remove_one_instance,
- .id_table = amd64_pci_table,
- .driver.probe_type = PROBE_FORCE_SYNCHRONOUS,
-};
-
static void setup_pci_device(void)
{
struct mem_ctl_info *mci;
@@ -3005,8 +2964,7 @@ static void setup_pci_device(void)
static int __init amd64_edac_init(void)
{
int err = -ENODEV;
-
- printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
+ int i;
opstate_init();
@@ -3022,13 +2980,14 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
- err = pci_register_driver(&amd64_pci_driver);
- if (err)
- goto err_pci;
+ for (i = 0; i < amd_nb_num(); i++)
+ if (probe_one_instance(i)) {
+ /* unwind properly */
+ while (--i >= 0)
+ remove_one_instance(i);
- err = -ENODEV;
- if (!atomic_read(&drv_instances))
- goto err_no_instances;
+ goto err_pci;
+ }
setup_pci_device();
@@ -3036,10 +2995,9 @@ static int __init amd64_edac_init(void)
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
#endif
- return 0;
+ printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
-err_no_instances:
- pci_unregister_driver(&amd64_pci_driver);
+ return 0;
err_pci:
msrs_free(msrs);
@@ -3055,10 +3013,13 @@ err_ret:
static void __exit amd64_edac_exit(void)
{
+ int i;
+
if (pci_ctl)
edac_pci_release_generic_ctl(pci_ctl);
- pci_unregister_driver(&amd64_pci_driver);
+ for (i = 0; i < amd_nb_num(); i++)
+ remove_one_instance(i);
kfree(ecc_stngs);
ecc_stngs = NULL;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c0f248f3aaf9..c08870479054 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -422,7 +422,7 @@ struct low_ops {
struct amd64_family_type {
const char *ctl_name;
- u16 f1_id, f3_id;
+ u16 f1_id, f2_id;
struct low_ops ops;
};
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 1472f48c8ac6..6aa256b0a1ed 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -923,7 +923,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
mci->ue_mc += count;
if (!enable_per_layer_report) {
- mci->ce_noinfo_count += count;
+ mci->ue_noinfo_count += count;
return;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 26e65ab5932a..10c305b4a2e1 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -998,11 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
+ struct bus_type *bus = mci->bus;
const char *name = mci->bus->name;
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
- bus_unregister(mci->bus);
+ bus_unregister(bus);
kfree(name);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 792bdae2b91d..8a68a5e943ea 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -271,16 +271,6 @@ struct i7core_pvt {
bool is_registered, enable_scrub;
- /* Fifo double buffers */
- struct mce mce_entry[MCE_LOG_LEN];
- struct mce mce_outentry[MCE_LOG_LEN];
-
- /* Fifo in/out counters */
- unsigned mce_in, mce_out;
-
- /* Count indicator to show errors not got */
- unsigned mce_overrun;
-
/* DCLK Frequency used for computing scrub rate */
int dclk_freq;
@@ -1792,56 +1782,15 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
* i7core_check_error Retrieve and process errors reported by the
* hardware. Called by the Core module.
*/
-static void i7core_check_error(struct mem_ctl_info *mci)
+static void i7core_check_error(struct mem_ctl_info *mci, struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- int i;
- unsigned count = 0;
- struct mce *m;
- /*
- * MCE first step: Copy all mce errors into a temporary buffer
- * We use a double buffering here, to reduce the risk of
- * losing an error.
- */
- smp_rmb();
- count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
- % MCE_LOG_LEN;
- if (!count)
- goto check_ce_error;
-
- m = pvt->mce_outentry;
- if (pvt->mce_in + count > MCE_LOG_LEN) {
- unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
- smp_wmb();
- pvt->mce_in = 0;
- count -= l;
- m += l;
- }
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
- smp_wmb();
- pvt->mce_in += count;
-
- smp_rmb();
- if (pvt->mce_overrun) {
- i7core_printk(KERN_ERR, "Lost %d memory errors\n",
- pvt->mce_overrun);
- smp_wmb();
- pvt->mce_overrun = 0;
- }
-
- /*
- * MCE second step: parse errors and display
- */
- for (i = 0; i < count; i++)
- i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
+ i7core_mce_output_error(mci, m);
/*
* Now, let's increment CE error counts
*/
-check_ce_error:
if (!pvt->is_registered)
i7core_udimm_check_mc_ecc_err(mci);
else
@@ -1849,12 +1798,8 @@ check_ce_error:
}
/*
- * i7core_mce_check_error Replicates mcelog routine to get errors
- * This routine simply queues mcelog errors, and
- * return. The error itself should be handled later
- * by i7core_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
+ * Check that logging is enabled and that this is the right type
+ * of error for us to handle.
*/
static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
@@ -1882,21 +1827,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->bank != 8)
return NOTIFY_DONE;
- smp_rmb();
- if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
- smp_wmb();
- pvt->mce_overrun++;
- return NOTIFY_DONE;
- }
-
- /* Copy memory error at the ringbuffer */
- memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
- smp_wmb();
- pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
- /* Handle fatal errors immediately */
- if (mce->mcgstatus & 1)
- i7core_check_error(mci);
+ i7core_check_error(mci, mce);
/* Advise mcelog that the errors were handled */
return NOTIFY_STOP;
@@ -2243,8 +2174,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
get_dimm_config(mci);
/* record ptr to the generic device */
mci->pdev = &i7core_dev->pdev[0]->dev;
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i7core_check_error;
/* Enable scrubrate setting */
if (pvt->enable_scrub)
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 18d77ace4813..1c88d9707495 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -17,6 +17,7 @@
* 015c: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
* 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
+ * 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
*
* Based on Intel specification:
* http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
@@ -55,6 +56,7 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
#define IE31200_DIMMS 4
#define IE31200_RANKS 8
@@ -105,8 +107,11 @@
* 1 Multiple Bit Error Status (MERRSTS)
* 0 Correctable Error Status (CERRSTS)
*/
+
#define IE31200_C0ECCERRLOG 0x40c8
#define IE31200_C1ECCERRLOG 0x44c8
+#define IE31200_C0ECCERRLOG_SKL 0x4048
+#define IE31200_C1ECCERRLOG_SKL 0x4448
#define IE31200_ECCERRLOG_CE BIT(0)
#define IE31200_ECCERRLOG_UE BIT(1)
#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
@@ -123,17 +128,28 @@
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-#define IE31200_MAD_DIMM_0_OFFSET 0x5004
-#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK BIT(17)
-#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
-
-#define IE31200_PAGES(n) (n << (28 - PAGE_SHIFT))
+#define IE31200_MAD_DIMM_0_OFFSET 0x5004
+#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
+#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
+#define IE31200_MAD_DIMM_A_RANK BIT(17)
+#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
+#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
+#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
+#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
+#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
+#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
+#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
+
+/* Skylake reports 1GB increments, everything else is 256MB */
+#define IE31200_PAGES(n, skl) \
+ (n << (28 + (2 * skl) - PAGE_SHIFT))
static int nr_channels;
struct ie31200_priv {
void __iomem *window;
+ void __iomem *c0errlog;
+ void __iomem *c1errlog;
};
enum ie31200_chips {
@@ -157,9 +173,9 @@ static const struct ie31200_dev_info ie31200_devs[] = {
};
struct dimm_data {
- u8 size; /* in 256MB multiples */
+ u8 size; /* in multiples of 256MB, except Skylake is 1GB */
u8 dual_rank : 1,
- x16_width : 1; /* 0 means x8 width */
+ x16_width : 2; /* 0 means x8 width */
};
static int how_many_channels(struct pci_dev *pdev)
@@ -197,11 +213,10 @@ static bool ecc_capable(struct pci_dev *pdev)
return true;
}
-static int eccerrlog_row(int channel, u64 log)
+static int eccerrlog_row(u64 log)
{
- int rank = ((log & IE31200_ECCERRLOG_RANK_BITS) >>
- IE31200_ECCERRLOG_RANK_SHIFT);
- return rank | (channel * IE31200_RANKS_PER_CHANNEL);
+ return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
+ IE31200_ECCERRLOG_RANK_SHIFT);
}
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
@@ -219,7 +234,6 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
struct ie31200_priv *priv = mci->pvt_info;
- void __iomem *window = priv->window;
pdev = to_pci_dev(mci->pdev);
@@ -232,9 +246,9 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
if (!(info->errsts & IE31200_ERRSTS_BITS))
return;
- info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
if (nr_channels == 2)
- info->eccerrlog[1] = lo_hi_readq(window + IE31200_C1ECCERRLOG);
+ info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts2);
@@ -245,10 +259,10 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
- info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
if (nr_channels == 2)
info->eccerrlog[1] =
- lo_hi_readq(window + IE31200_C1ECCERRLOG);
+ lo_hi_readq(priv->c1errlog);
}
ie31200_clear_error_info(mci);
@@ -274,14 +288,14 @@ static void ie31200_process_error_info(struct mem_ctl_info *mci,
if (log & IE31200_ECCERRLOG_UE) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
- eccerrlog_row(channel, log),
+ eccerrlog_row(log),
channel, -1,
"ie31200 UE", "");
} else if (log & IE31200_ECCERRLOG_CE) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0,
IE31200_ECCERRLOG_SYNDROME(log),
- eccerrlog_row(channel, log),
+ eccerrlog_row(log),
channel, -1,
"ie31200 CE", "");
}
@@ -326,6 +340,33 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return window;
}
+static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
+ int chan)
+{
+ dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
+ dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
+ dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
+ (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+}
+
+static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
+ int chan)
+{
+ dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
+ dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
+ dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
+}
+
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
+ bool skl)
+{
+ if (skl)
+ __skl_populate_dimm_info(dd, addr_decode, chan);
+ else
+ __populate_dimm_info(dd, addr_decode, chan);
+}
+
+
static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
{
int i, j, ret;
@@ -334,7 +375,8 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode;
+ u32 addr_decode, mad_offset;
+ bool skl = (pdev->device == PCI_DEVICE_ID_INTEL_IE31200_HB_8);
edac_dbg(0, "MC:\n");
@@ -363,7 +405,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR3;
+ if (skl)
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ else
+ mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
@@ -374,19 +419,24 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
+ if (skl) {
+ priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
+ priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
+ mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
+ } else {
+ priv->c0errlog = window + IE31200_C0ECCERRLOG;
+ priv->c1errlog = window + IE31200_C1ECCERRLOG;
+ mad_offset = IE31200_MAD_DIMM_0_OFFSET;
+ }
/* populate DIMM info */
for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + IE31200_MAD_DIMM_0_OFFSET +
+ addr_decode = readl(window + mad_offset +
(i * 4));
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- dimm_info[i][j].size = (addr_decode >> (j * 8)) &
- IE31200_MAD_DIMM_SIZE;
- dimm_info[i][j].dual_rank = (addr_decode &
- (IE31200_MAD_DIMM_A_RANK << j)) ? 1 : 0;
- dimm_info[i][j].x16_width = (addr_decode &
- (IE31200_MAD_DIMM_A_WIDTH << j)) ? 1 : 0;
+ populate_dimm_info(&dimm_info[i][j], addr_decode, j,
+ skl);
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
dimm_info[i][j].size,
dimm_info[i][j].dual_rank,
@@ -405,7 +455,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
struct dimm_info *dimm;
unsigned long nr_pages;
- nr_pages = IE31200_PAGES(dimm_info[j][i].size);
+ nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
if (nr_pages == 0)
continue;
@@ -417,7 +467,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* just a guess */
- dimm->mtype = MEM_DDR3;
+ if (skl)
+ dimm->mtype = MEM_DDR4;
+ else
+ dimm->mtype = MEM_DDR3;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
@@ -426,7 +479,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* same guess */
- dimm->mtype = MEM_DDR3;
+ if (skl)
+ dimm->mtype = MEM_DDR4;
+ else
+ dimm->mtype = MEM_DDR3;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
@@ -501,6 +557,9 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
IE31200},
{
+ PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ IE31200},
+ {
0,
} /* 0 terminated list. */
};
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 49768c08ac07..9b6800a79c7f 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1052,7 +1052,6 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
struct mce *m = (struct mce *)data;
struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
int ecc;
- u32 ebx = cpuid_ebx(0x80000007);
if (amd_filter_mce(m))
return NOTIFY_STOP;
@@ -1075,7 +1074,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
- if (!!(ebx & BIT(3))) {
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
u32 low, high;
u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
@@ -1094,7 +1093,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->status & MCI_STATUS_ADDRV)
pr_emerg(HW_ERR "MC%d Error Address: 0x%016llx\n", m->bank, m->addr);
- if (!!(ebx & BIT(3))) {
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
decode_smca_errors(m);
goto err_code;
}
@@ -1149,7 +1148,6 @@ static struct notifier_block amd_mce_dec_nb = {
static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- u32 ebx;
if (c->x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
@@ -1205,9 +1203,8 @@ static int __init mce_amd_init(void)
break;
case 0x17:
- ebx = cpuid_ebx(0x80000007);
xec_mask = 0x3f;
- if (!(ebx & BIT(3))) {
+ if (!boot_cpu_has(X86_FEATURE_SMCA)) {
printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
goto err_out;
}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 8bf745d2da7e..b4d0bf6534cf 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -21,6 +21,8 @@
#include <linux/smp.h>
#include <linux/bitmap.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/mce.h>
@@ -28,8 +30,6 @@
/* Static vars */
static LIST_HEAD(sbridge_edac_list);
-static DEFINE_MUTEX(sbridge_edac_lock);
-static int probed;
/*
* Alter this version for the module when modifications are made
@@ -364,16 +364,6 @@ struct sbridge_pvt {
bool is_mirrored, is_lockstep, is_close_pg;
bool is_chan_hash;
- /* Fifo double buffers */
- struct mce mce_entry[MCE_LOG_LEN];
- struct mce mce_outentry[MCE_LOG_LEN];
-
- /* Fifo in/out counters */
- unsigned mce_in, mce_out;
-
- /* Count indicator to show errors not got */
- unsigned mce_overrun;
-
/* Memory description */
u64 tolm, tohm;
struct knl_pvt knl;
@@ -662,18 +652,6 @@ static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
{0,} /* 0 terminated list. */
};
-/*
- * pci_device_id table for which devices we are looking for
- */
-static const struct pci_device_id sbridge_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0)},
- {0,} /* 0 terminated list. */
-};
-
/****************************************************************************
Ancillary status routines
@@ -3097,63 +3075,8 @@ err_parsing:
}
/*
- * sbridge_check_error Retrieve and process errors reported by the
- * hardware. Called by the Core module.
- */
-static void sbridge_check_error(struct mem_ctl_info *mci)
-{
- struct sbridge_pvt *pvt = mci->pvt_info;
- int i;
- unsigned count = 0;
- struct mce *m;
-
- /*
- * MCE first step: Copy all mce errors into a temporary buffer
- * We use a double buffering here, to reduce the risk of
- * loosing an error.
- */
- smp_rmb();
- count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
- % MCE_LOG_LEN;
- if (!count)
- return;
-
- m = pvt->mce_outentry;
- if (pvt->mce_in + count > MCE_LOG_LEN) {
- unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
- smp_wmb();
- pvt->mce_in = 0;
- count -= l;
- m += l;
- }
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
- smp_wmb();
- pvt->mce_in += count;
-
- smp_rmb();
- if (pvt->mce_overrun) {
- sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
- pvt->mce_overrun);
- smp_wmb();
- pvt->mce_overrun = 0;
- }
-
- /*
- * MCE second step: parse errors and display
- */
- for (i = 0; i < count; i++)
- sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
-}
-
-/*
- * sbridge_mce_check_error Replicates mcelog routine to get errors
- * This routine simply queues mcelog errors, and
- * return. The error itself should be handled later
- * by sbridge_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
+ * Check that logging is enabled and that this is the right type
+ * of error for us to handle.
*/
static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
@@ -3198,21 +3121,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
"%u APIC %x\n", mce->cpuvendor, mce->cpuid,
mce->time, mce->socketid, mce->apicid);
- smp_rmb();
- if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
- smp_wmb();
- pvt->mce_overrun++;
- return NOTIFY_DONE;
- }
-
- /* Copy memory error at the ringbuffer */
- memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
- smp_wmb();
- pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
- /* Handle fatal errors immediately */
- if (mce->mcgstatus & 1)
- sbridge_check_error(mci);
+ sbridge_mce_output_error(mci, mce);
/* Advice mcelog that the error were handled */
return NOTIFY_STOP;
@@ -3298,9 +3207,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
- /* Set the function pointer to an actual operation function */
- mci->edac_check = sbridge_check_error;
-
pvt->info.type = type;
switch (type) {
case IVY_BRIDGE:
@@ -3448,62 +3354,40 @@ fail0:
return rc;
}
+#define ICPU(model, table) \
+ { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
+
+/* Order here must match "enum type" */
+static const struct x86_cpu_id sbridge_cpuids[] = {
+ ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
+ ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
+ ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
+ ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
+ ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
+
/*
- * sbridge_probe Probe for ONE instance of device to see if it is
+ * sbridge_probe Get all devices and register memory controllers
* present.
* return:
* 0 for FOUND a device
* < 0 for error code
*/
-static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int sbridge_probe(const struct x86_cpu_id *id)
{
int rc = -ENODEV;
u8 mc, num_mc = 0;
struct sbridge_dev *sbridge_dev;
- enum type type = SANDY_BRIDGE;
+ struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
/* get the pci devices we want to reserve for our use */
- mutex_lock(&sbridge_edac_lock);
-
- /*
- * All memory controllers are allocated at the first pass.
- */
- if (unlikely(probed >= 1)) {
- mutex_unlock(&sbridge_edac_lock);
- return -ENODEV;
- }
- probed++;
+ rc = sbridge_get_all_devices(&num_mc, ptable);
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_ibridge_table);
- type = IVY_BRIDGE;
- break;
- case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_sbridge_table);
- type = SANDY_BRIDGE;
- break;
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_haswell_table);
- type = HASWELL;
- break;
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_broadwell_table);
- type = BROADWELL;
- break;
- case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
- rc = sbridge_get_all_devices_knl(&num_mc,
- pci_dev_descr_knl_table);
- type = KNIGHTS_LANDING;
- break;
- }
if (unlikely(rc < 0)) {
- edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
+ edac_dbg(0, "couldn't get all devices\n");
goto fail0;
}
@@ -3514,14 +3398,13 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
- rc = sbridge_register_mci(sbridge_dev, type);
+ rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
if (unlikely(rc < 0))
goto fail1;
}
sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
- mutex_unlock(&sbridge_edac_lock);
return 0;
fail1:
@@ -3530,74 +3413,47 @@ fail1:
sbridge_put_all_devices();
fail0:
- mutex_unlock(&sbridge_edac_lock);
return rc;
}
/*
- * sbridge_remove destructor for one instance of device
+ * sbridge_remove cleanup
*
*/
-static void sbridge_remove(struct pci_dev *pdev)
+static void sbridge_remove(void)
{
struct sbridge_dev *sbridge_dev;
edac_dbg(0, "\n");
- /*
- * we have a trouble here: pdev value for removal will be wrong, since
- * it will point to the X58 register used to detect that the machine
- * is a Nehalem or upper design. However, due to the way several PCI
- * devices are grouped together to provide MC functionality, we need
- * to use a different method for releasing the devices
- */
-
- mutex_lock(&sbridge_edac_lock);
-
- if (unlikely(!probed)) {
- mutex_unlock(&sbridge_edac_lock);
- return;
- }
-
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
sbridge_unregister_mci(sbridge_dev);
/* Release PCI resources */
sbridge_put_all_devices();
-
- probed--;
-
- mutex_unlock(&sbridge_edac_lock);
}
-MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
-
-/*
- * sbridge_driver pci_driver structure for this module
- *
- */
-static struct pci_driver sbridge_driver = {
- .name = "sbridge_edac",
- .probe = sbridge_probe,
- .remove = sbridge_remove,
- .id_table = sbridge_pci_tbl,
-};
-
/*
* sbridge_init Module entry function
* Try to initialize this module for its devices
*/
static int __init sbridge_init(void)
{
- int pci_rc;
+ const struct x86_cpu_id *id;
+ int rc;
edac_dbg(2, "\n");
+ id = x86_match_cpu(sbridge_cpuids);
+ if (!id)
+ return -ENODEV;
+
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- pci_rc = pci_register_driver(&sbridge_driver);
- if (pci_rc >= 0) {
+ rc = sbridge_probe(id);
+
+ if (rc >= 0) {
mce_register_decode_chain(&sbridge_mce_dec);
if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
@@ -3605,9 +3461,9 @@ static int __init sbridge_init(void)
}
sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
- pci_rc);
+ rc);
- return pci_rc;
+ return rc;
}
/*
@@ -3617,7 +3473,7 @@ static int __init sbridge_init(void)
static void __exit sbridge_exit(void)
{
edac_dbg(2, "\n");
- pci_unregister_driver(&sbridge_driver);
+ sbridge_remove();
mce_unregister_decode_chain(&sbridge_mce_dec);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index e1670d533f97..6394152f648f 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,31 @@ config EFI_RUNTIME_WRAPPERS
config EFI_ARMSTUB
bool
+config EFI_BOOTLOADER_CONTROL
+ tristate "EFI Bootloader Control"
+ depends on EFI_VARS
+ default n
+ ---help---
+ This module installs a reboot hook, such that if reboot() is
+ invoked with a string argument NNN, "NNN" is copied to the
+ "LoaderEntryOneShot" EFI variable, to be read by the
+ bootloader. If the string matches one of the boot labels
+ defined in its configuration, the bootloader will boot once
+ to that label. The "LoaderEntryRebootReason" EFI variable is
+ set with the reboot reason: "reboot" or "shutdown". The
+ bootloader reads this reboot reason and takes particular
+ action according to its policy.
+
+config EFI_CAPSULE_LOADER
+ tristate "EFI capsule loader"
+ depends on EFI
+ help
+ This option exposes a loader interface "/dev/efi_capsule_loader" for
+ users to load EFI capsules. This driver requires working runtime
+ capsule support in the firmware, which many OEMs do not provide.
+
+ Most users should say N.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 62e654f255f4..a219640f881f 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -9,7 +9,8 @@
#
KASAN_SANITIZE_runtime-wrappers.o := n
-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI) += capsule.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
@@ -18,7 +19,9 @@ obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
obj-$(CONFIG_EFI_STUB) += libstub/
obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
+obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 8714f8c271ba..a850cbc48d8d 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -11,17 +11,19 @@
*
*/
+#define pr_fmt(fmt) "efi: " fmt
+
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
#include <asm/efi.h>
-struct efi_memory_map memmap;
-
u64 efi_system_table;
static int __init is_normal_ram(efi_memory_desc_t *md)
@@ -40,7 +42,7 @@ static phys_addr_t efi_to_phys(unsigned long addr)
{
efi_memory_desc_t *md;
- for_each_efi_memory_desc(&memmap, md) {
+ for_each_efi_memory_desc(md) {
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
if (md->virt_addr == 0)
@@ -53,6 +55,36 @@ static phys_addr_t efi_to_phys(unsigned long addr)
return addr;
}
+static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+static __initdata efi_config_table_type_t arch_tables[] = {
+ {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
+ {NULL_GUID, NULL, NULL}
+};
+
+static void __init init_screen_info(void)
+{
+ struct screen_info *si;
+
+ if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+ si = early_memremap_ro(screen_info_table, sizeof(*si));
+ if (!si) {
+ pr_err("Could not map screen_info config table\n");
+ return;
+ }
+ screen_info = *si;
+ early_memunmap(si, sizeof(*si));
+
+ /* dummycon on ARM needs non-zero values for columns/lines */
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_lines = 25;
+ }
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+ memblock_is_map_memory(screen_info.lfb_base))
+ memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
+}
+
static int __init uefi_init(void)
{
efi_char16_t *c16;
@@ -85,6 +117,8 @@ static int __init uefi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff);
+ efi.runtime_version = efi.systab->hdr.revision;
+
/* Show what we know for posterity */
c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor) * sizeof(efi_char16_t));
@@ -108,7 +142,8 @@ static int __init uefi_init(void)
goto out;
}
retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
- sizeof(efi_config_table_t), NULL);
+ sizeof(efi_config_table_t),
+ arch_tables);
early_memunmap(config_tables, table_size);
out:
@@ -143,7 +178,15 @@ static __init void reserve_regions(void)
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI memory map:\n");
- for_each_efi_memory_desc(&memmap, md) {
+ /*
+ * Discard memblocks discovered so far: if there are any at this
+ * point, they originate from memory nodes in the DT, and UEFI
+ * uses its own memory map instead.
+ */
+ memblock_dump_all();
+ memblock_remove(0, (phys_addr_t)ULLONG_MAX);
+
+ for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
npages = md->num_pages;
@@ -184,9 +227,9 @@ void __init efi_init(void)
efi_system_table = params.system_table;
- memmap.phys_map = params.mmap;
- memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
- if (memmap.map == NULL) {
+ efi.memmap.phys_map = params.mmap;
+ efi.memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
+ if (efi.memmap.map == NULL) {
/*
* If we are booting via UEFI, the UEFI memory map is the only
* description of memory we have, so there is little point in
@@ -194,28 +237,37 @@ void __init efi_init(void)
*/
panic("Unable to map EFI memory map.\n");
}
- memmap.map_end = memmap.map + params.mmap_size;
- memmap.desc_size = params.desc_size;
- memmap.desc_version = params.desc_ver;
+ efi.memmap.map_end = efi.memmap.map + params.mmap_size;
+ efi.memmap.desc_size = params.desc_size;
+ efi.memmap.desc_version = params.desc_ver;
+
+ WARN(efi.memmap.desc_version != 1,
+ "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+ efi.memmap.desc_version);
if (uefi_init() < 0)
return;
reserve_regions();
- early_memunmap(memmap.map, params.mmap_size);
+ efi_memattr_init();
+ early_memunmap(efi.memmap.map, params.mmap_size);
- if (IS_ENABLED(CONFIG_ARM)) {
- /*
- * ARM currently does not allow ioremap_cache() to be called on
- * memory regions that are covered by struct page. So remove the
- * UEFI memory map from the linear mapping.
- */
- memblock_mark_nomap(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
- } else {
- memblock_reserve(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
- }
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+
+ init_screen_info();
+}
+
+static int __init register_gop_device(void)
+{
+ void *pd;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return 0;
+
+ pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
+ &screen_info, sizeof(screen_info));
+ return PTR_ERR_OR_ZERO(pd);
}
+subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 6ae21e41a429..17ccf0a8787a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -42,11 +42,13 @@ static struct mm_struct efi_mm = {
static bool __init efi_virtmap_init(void)
{
efi_memory_desc_t *md;
+ bool systab_found;
efi_mm.pgd = pgd_alloc(&efi_mm);
init_new_context(NULL, &efi_mm);
- for_each_efi_memory_desc(&memmap, md) {
+ systab_found = false;
+ for_each_efi_memory_desc(md) {
phys_addr_t phys = md->phys_addr;
int ret;
@@ -64,7 +66,25 @@ static bool __init efi_virtmap_init(void)
&phys, ret);
return false;
}
+ /*
+ * If this entry covers the address of the UEFI system table,
+ * calculate and record its virtual address.
+ */
+ if (efi_system_table >= phys &&
+ efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
+ efi.systab = (void *)(unsigned long)(efi_system_table -
+ phys + md->virt_addr);
+ systab_found = true;
+ }
+ }
+ if (!systab_found) {
+ pr_err("No virtual mapping found for the UEFI System Table\n");
+ return false;
}
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
return true;
}
@@ -89,26 +109,17 @@ static int __init arm_enable_runtime_services(void)
pr_info("Remapping and enabling EFI services.\n");
- mapsize = memmap.map_end - memmap.map;
- memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
- mapsize);
- if (!memmap.map) {
- pr_err("Failed to remap EFI memory map\n");
- return -ENOMEM;
- }
- memmap.map_end = memmap.map + mapsize;
- efi.memmap = &memmap;
+ mapsize = efi.memmap.map_end - efi.memmap.map;
- efi.systab = (__force void *)ioremap_cache(efi_system_table,
- sizeof(efi_system_table_t));
- if (!efi.systab) {
- pr_err("Failed to remap EFI System Table\n");
+ efi.memmap.map = memremap(efi.memmap.phys_map, mapsize, MEMREMAP_WB);
+ if (!efi.memmap.map) {
+ pr_err("Failed to remap EFI memory map\n");
return -ENOMEM;
}
- set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+ efi.memmap.map_end = efi.memmap.map + mapsize;
if (!efi_virtmap_init()) {
- pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
return -ENOMEM;
}
@@ -116,8 +127,6 @@ static int __init arm_enable_runtime_services(void)
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
- efi.runtime_version = efi.systab->hdr.revision;
-
return 0;
}
early_initcall(arm_enable_runtime_services);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
new file mode 100644
index 000000000000..c99c24bc79b0
--- /dev/null
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -0,0 +1,343 @@
+/*
+ * EFI capsule loader driver.
+ *
+ * Copyright 2015 Intel Corporation
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/efi.h>
+
+#define NO_FURTHER_WRITE_ACTION -1
+
+struct capsule_info {
+ bool header_obtained;
+ int reset_type;
+ long index;
+ size_t count;
+ size_t total_size;
+ struct page **pages;
+ size_t page_bytes_remain;
+};
+
+/**
+ * efi_free_all_buff_pages - free all previous allocated buffer pages
+ * @cap_info: pointer to current instance of capsule_info structure
+ *
+ * In addition to freeing buffer pages, it flags NO_FURTHER_WRITE_ACTION
+ * to cease processing data in subsequent write(2) calls until close(2)
+ * is called.
+ **/
+static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+{
+ while (cap_info->index > 0)
+ __free_page(cap_info->pages[--cap_info->index]);
+
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+}
+
+/**
+ * efi_capsule_setup_info - obtain the efi capsule header in the binary and
+ * setup capsule_info structure
+ * @cap_info: pointer to current instance of capsule_info structure
+ * @kbuff: a mapped first page buffer pointer
+ * @hdr_bytes: the total received number of bytes for efi header
+ **/
+static ssize_t efi_capsule_setup_info(struct capsule_info *cap_info,
+ void *kbuff, size_t hdr_bytes)
+{
+ efi_capsule_header_t *cap_hdr;
+ size_t pages_needed;
+ int ret;
+ void *temp_page;
+
+ /* Only process data block that is larger than efi header size */
+ if (hdr_bytes < sizeof(efi_capsule_header_t))
+ return 0;
+
+ /* Reset back to the correct offset of header */
+ cap_hdr = kbuff - cap_info->count;
+ pages_needed = ALIGN(cap_hdr->imagesize, PAGE_SIZE) >> PAGE_SHIFT;
+
+ if (pages_needed == 0) {
+ pr_err("%s: pages count invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check if the capsule binary supported */
+ ret = efi_capsule_supported(cap_hdr->guid, cap_hdr->flags,
+ cap_hdr->imagesize,
+ &cap_info->reset_type);
+ if (ret) {
+ pr_err("%s: efi_capsule_supported() failed\n",
+ __func__);
+ return ret;
+ }
+
+ cap_info->total_size = cap_hdr->imagesize;
+ temp_page = krealloc(cap_info->pages,
+ pages_needed * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page) {
+ pr_debug("%s: krealloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ cap_info->pages = temp_page;
+ cap_info->header_obtained = true;
+
+ return 0;
+}
+
+/**
+ * efi_capsule_submit_update - invoke the efi_capsule_update API once binary
+ * upload done
+ * @cap_info: pointer to current instance of capsule_info structure
+ **/
+static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+{
+ int ret;
+ void *cap_hdr_temp;
+
+ cap_hdr_temp = kmap(cap_info->pages[0]);
+ if (!cap_hdr_temp) {
+ pr_debug("%s: kmap() failed\n", __func__);
+ return -EFAULT;
+ }
+
+ ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
+ kunmap(cap_info->pages[0]);
+ if (ret) {
+ pr_err("%s: efi_capsule_update() failed\n", __func__);
+ return ret;
+ }
+
+ /* Indicate capsule binary uploading is done */
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+ pr_info("%s: Successfully upload capsule file with reboot type '%s'\n",
+ __func__, !cap_info->reset_type ? "RESET_COLD" :
+ cap_info->reset_type == 1 ? "RESET_WARM" :
+ "RESET_SHUTDOWN");
+ return 0;
+}
+
+/**
+ * efi_capsule_write - store the capsule binary and pass it to
+ * efi_capsule_update() API
+ * @file: file pointer
+ * @buff: buffer pointer
+ * @count: number of bytes in @buff
+ * @offp: not used
+ *
+ * Expectation:
+ * - A user space tool should start at the beginning of capsule binary and
+ * pass data in sequentially.
+ * - Users should close and re-open this file note in order to upload more
+ * capsules.
+ * - After an error returned, user should close the file and restart the
+ * operation for the next try otherwise -EIO will be returned until the
+ * file is closed.
+ * - An EFI capsule header must be located at the beginning of capsule
+ * binary file and passed in as first block data of write operation.
+ **/
+static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
+ size_t count, loff_t *offp)
+{
+ int ret = 0;
+ struct capsule_info *cap_info = file->private_data;
+ struct page *page;
+ void *kbuff = NULL;
+ size_t write_byte;
+
+ if (count == 0)
+ return 0;
+
+ /* Return error while NO_FURTHER_WRITE_ACTION is flagged */
+ if (cap_info->index < 0)
+ return -EIO;
+
+ /* Only alloc a new page when previous page is full */
+ if (!cap_info->page_bytes_remain) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ pr_debug("%s: alloc_page() failed\n", __func__);
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ cap_info->pages[cap_info->index++] = page;
+ cap_info->page_bytes_remain = PAGE_SIZE;
+ }
+
+ page = cap_info->pages[cap_info->index - 1];
+
+ kbuff = kmap(page);
+ if (!kbuff) {
+ pr_debug("%s: kmap() failed\n", __func__);
+ ret = -EFAULT;
+ goto failed;
+ }
+ kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
+
+ /* Copy capsule binary data from user space to kernel space buffer */
+ write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
+ if (copy_from_user(kbuff, buff, write_byte)) {
+ pr_debug("%s: copy_from_user() failed\n", __func__);
+ ret = -EFAULT;
+ goto fail_unmap;
+ }
+ cap_info->page_bytes_remain -= write_byte;
+
+ /* Setup capsule binary info structure */
+ if (!cap_info->header_obtained) {
+ ret = efi_capsule_setup_info(cap_info, kbuff,
+ cap_info->count + write_byte);
+ if (ret)
+ goto fail_unmap;
+ }
+
+ cap_info->count += write_byte;
+ kunmap(page);
+
+ /* Submit the full binary to efi_capsule_update() API */
+ if (cap_info->header_obtained &&
+ cap_info->count >= cap_info->total_size) {
+ if (cap_info->count > cap_info->total_size) {
+ pr_err("%s: upload size exceeded header defined size\n",
+ __func__);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ret = efi_capsule_submit_update(cap_info);
+ if (ret)
+ goto failed;
+ }
+
+ return write_byte;
+
+fail_unmap:
+ kunmap(page);
+failed:
+ efi_free_all_buff_pages(cap_info);
+ return ret;
+}
+
+/**
+ * efi_capsule_flush - called by file close or file flush
+ * @file: file pointer
+ * @id: not used
+ *
+ * If a capsule is being partially uploaded then calling this function
+ * will be treated as upload termination and will free those completed
+ * buffer pages and -ECANCELED will be returned.
+ **/
+static int efi_capsule_flush(struct file *file, fl_owner_t id)
+{
+ int ret = 0;
+ struct capsule_info *cap_info = file->private_data;
+
+ if (cap_info->index > 0) {
+ pr_err("%s: capsule upload not complete\n", __func__);
+ efi_free_all_buff_pages(cap_info);
+ ret = -ECANCELED;
+ }
+
+ return ret;
+}
+
+/**
+ * efi_capsule_release - called by file close
+ * @inode: not used
+ * @file: file pointer
+ *
+ * We will not free successfully submitted pages since efi update
+ * requires data to be maintained across system reboot.
+ **/
+static int efi_capsule_release(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info = file->private_data;
+
+ kfree(cap_info->pages);
+ kfree(file->private_data);
+ file->private_data = NULL;
+ return 0;
+}
+
+/**
+ * efi_capsule_open - called by file open
+ * @inode: not used
+ * @file: file pointer
+ *
+ * Will allocate each capsule_info memory for each file open call.
+ * This provided the capability to support multiple file open feature
+ * where user is not needed to wait for others to finish in order to
+ * upload their capsule binary.
+ **/
+static int efi_capsule_open(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info;
+
+ cap_info = kzalloc(sizeof(*cap_info), GFP_KERNEL);
+ if (!cap_info)
+ return -ENOMEM;
+
+ cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->pages) {
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
+ file->private_data = cap_info;
+
+ return 0;
+}
+
+static const struct file_operations efi_capsule_fops = {
+ .owner = THIS_MODULE,
+ .open = efi_capsule_open,
+ .write = efi_capsule_write,
+ .flush = efi_capsule_flush,
+ .release = efi_capsule_release,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice efi_capsule_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "efi_capsule_loader",
+ .fops = &efi_capsule_fops,
+};
+
+static int __init efi_capsule_loader_init(void)
+{
+ int ret;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return -ENODEV;
+
+ ret = misc_register(&efi_capsule_misc);
+ if (ret)
+ pr_err("%s: Failed to register misc char file note\n",
+ __func__);
+
+ return ret;
+}
+module_init(efi_capsule_loader_init);
+
+static void __exit efi_capsule_loader_exit(void)
+{
+ misc_deregister(&efi_capsule_misc);
+}
+module_exit(efi_capsule_loader_exit);
+
+MODULE_DESCRIPTION("EFI capsule firmware binary loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
new file mode 100644
index 000000000000..53b9fd2293ee
--- /dev/null
+++ b/drivers/firmware/efi/capsule.c
@@ -0,0 +1,308 @@
+/*
+ * EFI capsule support.
+ *
+ * Copyright 2013 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+typedef struct {
+ u64 length;
+ u64 data;
+} efi_capsule_block_desc_t;
+
+static bool capsule_pending;
+static bool stop_capsules;
+static int efi_reset_type = -1;
+
+/*
+ * capsule_mutex serialises access to both capsule_pending and
+ * efi_reset_type and stop_capsules.
+ */
+static DEFINE_MUTEX(capsule_mutex);
+
+/**
+ * efi_capsule_pending - has a capsule been passed to the firmware?
+ * @reset_type: store the type of EFI reset if capsule is pending
+ *
+ * To ensure that the registered capsule is processed correctly by the
+ * firmware we need to perform a specific type of reset. If a capsule is
+ * pending return the reset type in @reset_type.
+ *
+ * This function will race with callers of efi_capsule_update(), for
+ * example, calling this function while somebody else is in
+ * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
+ * will miss the updates to capsule_pending and efi_reset_type after
+ * efi_capsule_update_locked() completes.
+ *
+ * A non-racy use is from platform reboot code because we use
+ * system_state to ensure no capsules can be sent to the firmware once
+ * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
+ */
+bool efi_capsule_pending(int *reset_type)
+{
+ if (!capsule_pending)
+ return false;
+
+ if (reset_type)
+ *reset_type = efi_reset_type;
+
+ return true;
+}
+
+/*
+ * Whitelist of EFI capsule flags that we support.
+ *
+ * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
+ * require us to prepare the kernel for reboot. Refuse to load any
+ * capsules with that flag and any other flags that we do not know how
+ * to handle.
+ */
+#define EFI_CAPSULE_SUPPORTED_FLAG_MASK \
+ (EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
+
+/**
+ * efi_capsule_supported - does the firmware support the capsule?
+ * @guid: vendor guid of capsule
+ * @flags: capsule flags
+ * @size: size of capsule data
+ * @reset: the reset type required for this capsule
+ *
+ * Check whether a capsule with @flags is supported by the firmware
+ * and that @size doesn't exceed the maximum size for a capsule.
+ *
+ * No attempt is made to check @reset against the reset type required
+ * by any pending capsules because of the races involved.
+ */
+int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
+{
+ efi_capsule_header_t capsule;
+ efi_capsule_header_t *cap_list[] = { &capsule };
+ efi_status_t status;
+ u64 max_size;
+
+ if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
+ return -EINVAL;
+
+ capsule.headersize = capsule.imagesize = sizeof(capsule);
+ memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
+ capsule.flags = flags;
+
+ status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
+ if (status != EFI_SUCCESS)
+ return efi_status_to_err(status);
+
+ if (size > max_size)
+ return -ENOSPC;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_supported);
+
+/*
+ * Every scatter gather list (block descriptor) page must end with a
+ * continuation pointer. The last continuation pointer of the last
+ * page must be zero to mark the end of the chain.
+ */
+#define SGLIST_PER_PAGE ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
+
+/*
+ * How many scatter gather list (block descriptor) pages do we need
+ * to map @count pages?
+ */
+static inline unsigned int sg_pages_num(unsigned int count)
+{
+ return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
+}
+
+/**
+ * efi_capsule_update_locked - pass a single capsule to the firmware
+ * @capsule: capsule to send to the firmware
+ * @sg_pages: array of scatter gather (block descriptor) pages
+ * @reset: the reset type required for @capsule
+ *
+ * Since this function must be called under capsule_mutex check
+ * whether efi_reset_type will conflict with @reset, and atomically
+ * set it and capsule_pending if a capsule was successfully sent to
+ * the firmware.
+ *
+ * We also check to see if the system is about to restart, and if so,
+ * abort. This avoids races between efi_capsule_update() and
+ * efi_capsule_pending().
+ */
+static int
+efi_capsule_update_locked(efi_capsule_header_t *capsule,
+ struct page **sg_pages, int reset)
+{
+ efi_physical_addr_t sglist_phys;
+ efi_status_t status;
+
+ lockdep_assert_held(&capsule_mutex);
+
+ /*
+ * If someone has already registered a capsule that requires a
+ * different reset type, we're out of luck and must abort.
+ */
+ if (efi_reset_type >= 0 && efi_reset_type != reset) {
+ pr_err("Conflicting capsule reset type %d (%d).\n",
+ reset, efi_reset_type);
+ return -EINVAL;
+ }
+
+ /*
+ * If the system is getting ready to restart it may have
+ * called efi_capsule_pending() to make decisions (such as
+ * whether to force an EFI reboot), and we're racing against
+ * that call. Abort in that case.
+ */
+ if (unlikely(stop_capsules)) {
+ pr_warn("Capsule update raced with reboot, aborting.\n");
+ return -EINVAL;
+ }
+
+ sglist_phys = page_to_phys(sg_pages[0]);
+
+ status = efi.update_capsule(&capsule, 1, sglist_phys);
+ if (status == EFI_SUCCESS) {
+ capsule_pending = true;
+ efi_reset_type = reset;
+ }
+
+ return efi_status_to_err(status);
+}
+
+/**
+ * efi_capsule_update - send a capsule to the firmware
+ * @capsule: capsule to send to firmware
+ * @pages: an array of capsule data pages
+ *
+ * Build a scatter gather list with EFI capsule block descriptors to
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+ * @capsule must be a virtual mapping of the first page in @pages
+ * (@pages[0]) in the kernel address space. That is, a
+ * capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
+ * the capsule guid, users will likely want to check that
+ * efi_capsule_supported() returns true before calling this function
+ * because it makes it easier to print helpful error messages.
+ *
+ * If the capsule is successfully submitted to the firmware, any
+ * subsequent calls to efi_capsule_pending() will return true. @pages
+ * must not be released or modified if this function returns
+ * successfully.
+ *
+ * Callers must be prepared for this function to fail, which can
+ * happen if we raced with system reboot or if there is already a
+ * pending capsule that has a reset type that conflicts with the one
+ * required by @capsule. Do NOT use efi_capsule_pending() to detect
+ * this conflict since that would be racy. Instead, submit the capsule
+ * to efi_capsule_update() and check the return value.
+ *
+ * Return 0 on success, a converted EFI status code on failure.
+ */
+int efi_capsule_update(efi_capsule_header_t *capsule, struct page **pages)
+{
+ u32 imagesize = capsule->imagesize;
+ efi_guid_t guid = capsule->guid;
+ unsigned int count, sg_count;
+ u32 flags = capsule->flags;
+ struct page **sg_pages;
+ int rv, reset_type;
+ int i, j;
+
+ rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
+ if (rv)
+ return rv;
+
+ count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
+ sg_count = sg_pages_num(count);
+
+ sg_pages = kzalloc(sg_count * sizeof(*sg_pages), GFP_KERNEL);
+ if (!sg_pages)
+ return -ENOMEM;
+
+ for (i = 0; i < sg_count; i++) {
+ sg_pages[i] = alloc_page(GFP_KERNEL);
+ if (!sg_pages[i]) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < sg_count; i++) {
+ efi_capsule_block_desc_t *sglist;
+
+ sglist = kmap(sg_pages[i]);
+ if (!sglist) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
+ u64 sz = min_t(u64, imagesize, PAGE_SIZE);
+
+ sglist[j].length = sz;
+ sglist[j].data = page_to_phys(*pages++);
+
+ imagesize -= sz;
+ count--;
+ }
+
+ /* Continuation pointer */
+ sglist[j].length = 0;
+
+ if (i + 1 == sg_count)
+ sglist[j].data = 0;
+ else
+ sglist[j].data = page_to_phys(sg_pages[i + 1]);
+
+ kunmap(sg_pages[i]);
+ }
+
+ mutex_lock(&capsule_mutex);
+ rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
+ mutex_unlock(&capsule_mutex);
+
+out:
+ for (i = 0; rv && i < sg_count; i++) {
+ if (sg_pages[i])
+ __free_page(sg_pages[i]);
+ }
+
+ kfree(sg_pages);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_update);
+
+static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
+{
+ mutex_lock(&capsule_mutex);
+ stop_capsules = true;
+ mutex_unlock(&capsule_mutex);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block capsule_reboot_nb = {
+ .notifier_call = capsule_reboot_notify,
+};
+
+static int __init capsule_reboot_register(void)
+{
+ return register_reboot_notifier(&capsule_reboot_nb);
+}
+core_initcall(capsule_reboot_register);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3a69ed5ecfcb..05509f3aaee8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -43,6 +43,7 @@ struct efi __read_mostly efi = {
.config_table = EFI_INVALID_TABLE_ADDR,
.esrt = EFI_INVALID_TABLE_ADDR,
.properties_table = EFI_INVALID_TABLE_ADDR,
+ .mem_attr_table = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -256,7 +257,7 @@ subsys_initcall(efisubsys_init);
*/
int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
- struct efi_memory_map *map = efi.memmap;
+ struct efi_memory_map *map = &efi.memmap;
phys_addr_t p, e;
if (!efi_enabled(EFI_MEMMAP)) {
@@ -338,6 +339,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{NULL_GUID, NULL, NULL},
};
@@ -351,8 +353,9 @@ static __init int match_config_table(efi_guid_t *guid,
for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
if (!efi_guidcmp(*guid, table_types[i].guid)) {
*(table_types[i].ptr) = table;
- pr_cont(" %s=0x%lx ",
- table_types[i].name, table);
+ if (table_types[i].name)
+ pr_cont(" %s=0x%lx ",
+ table_types[i].name, table);
return 1;
}
}
@@ -620,16 +623,12 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
*/
u64 __weak efi_mem_attributes(unsigned long phys_addr)
{
- struct efi_memory_map *map;
efi_memory_desc_t *md;
- void *p;
if (!efi_enabled(EFI_MEMMAP))
return 0;
- map = efi.memmap;
- for (p = map->map; p < map->map_end; p += map->desc_size) {
- md = p;
+ for_each_efi_memory_desc(md) {
if ((md->phys_addr <= phys_addr) &&
(phys_addr < (md->phys_addr +
(md->num_pages << EFI_PAGE_SHIFT))))
@@ -637,3 +636,36 @@ u64 __weak efi_mem_attributes(unsigned long phys_addr)
}
return 0;
}
+
+int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_SUCCESS:
+ err = 0;
+ break;
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
new file mode 100644
index 000000000000..8dd0c7085e59
--- /dev/null
+++ b/drivers/firmware/efi/efibc.c
@@ -0,0 +1,113 @@
+/*
+ * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
+ * Copyright (c) 2013-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define pr_fmt(fmt) "efibc: " fmt
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
+{
+ size_t i;
+
+ for (i = 0; i < strlen(str); i++)
+ str16[i] = str[i];
+
+ str16[i] = '\0';
+}
+
+static int efibc_set_variable(const char *name, const char *value)
+{
+ int ret;
+ efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
+ struct efivar_entry *entry;
+ size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+
+ if (size > sizeof(entry->var.Data)) {
+ pr_err("value is too large");
+ return -EINVAL;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("failed to allocate efivar entry");
+ return -ENOMEM;
+ }
+
+ efibc_str_to_str16(name, entry->var.VariableName);
+ efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
+ memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
+
+ ret = efivar_entry_set(entry,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ size, entry->var.Data, NULL);
+ if (ret)
+ pr_err("failed to set %s EFI variable: 0x%x\n",
+ name, ret);
+
+ kfree(entry);
+ return ret;
+}
+
+static int efibc_reboot_notifier_call(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ const char *reason = "shutdown";
+ int ret;
+
+ if (event == SYS_RESTART)
+ reason = "reboot";
+
+ ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+ if (ret || !data)
+ return NOTIFY_DONE;
+
+ efibc_set_variable("LoaderEntryOneShot", (char *)data);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efibc_reboot_notifier = {
+ .notifier_call = efibc_reboot_notifier_call,
+};
+
+static int __init efibc_init(void)
+{
+ int ret;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return -ENODEV;
+
+ ret = register_reboot_notifier(&efibc_reboot_notifier);
+ if (ret)
+ pr_err("unable to register reboot notifier\n");
+
+ return ret;
+}
+module_init(efibc_init);
+
+static void __exit efibc_exit(void)
+{
+ unregister_reboot_notifier(&efibc_reboot_notifier);
+}
+module_exit(efibc_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_AUTHOR("Matt Gumbel <matthew.k.gumbel@intel.com");
+MODULE_DESCRIPTION("EFI Bootloader Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 096adcbcb5a9..116b244dee68 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -661,7 +661,7 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
return;
err = efivar_init(efivar_update_sysfs_entry, entry,
- true, false, &efivar_sysfs_list);
+ false, &efivar_sysfs_list);
if (!err)
break;
@@ -730,8 +730,7 @@ int efivars_sysfs_init(void)
return -ENOMEM;
}
- efivar_init(efivars_sysfs_callback, NULL, false,
- true, &efivar_sysfs_list);
+ efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
error = create_efivars_bin_attributes();
if (error) {
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index ed3a854950cc..48430aba13c1 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -57,7 +57,7 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
void __init efi_fake_memmap(void)
{
u64 start, end, m_start, m_end, m_attr;
- int new_nr_map = memmap.nr_map;
+ int new_nr_map = efi.memmap.nr_map;
efi_memory_desc_t *md;
phys_addr_t new_memmap_phy;
void *new_memmap;
@@ -68,8 +68,7 @@ void __init efi_fake_memmap(void)
return;
/* count up the number of EFI memory descriptor */
- for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
- md = old;
+ for_each_efi_memory_desc(md) {
start = md->phys_addr;
end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
@@ -95,25 +94,25 @@ void __init efi_fake_memmap(void)
}
/* allocate memory for new EFI memmap */
- new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
+ new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
PAGE_SIZE);
if (!new_memmap_phy)
return;
/* create new EFI memmap */
new_memmap = early_memremap(new_memmap_phy,
- memmap.desc_size * new_nr_map);
+ efi.memmap.desc_size * new_nr_map);
if (!new_memmap) {
- memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
+ memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
return;
}
- for (old = memmap.map, new = new_memmap;
- old < memmap.map_end;
- old += memmap.desc_size, new += memmap.desc_size) {
+ for (old = efi.memmap.map, new = new_memmap;
+ old < efi.memmap.map_end;
+ old += efi.memmap.desc_size, new += efi.memmap.desc_size) {
/* copy original EFI memory descriptor */
- memcpy(new, old, memmap.desc_size);
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
start = md->phys_addr;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
@@ -134,8 +133,8 @@ void __init efi_fake_memmap(void)
md->num_pages = (m_end - md->phys_addr + 1) >>
EFI_PAGE_SHIFT;
/* latter part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->phys_addr = m_end + 1;
md->num_pages = (end - md->phys_addr + 1) >>
@@ -147,16 +146,16 @@ void __init efi_fake_memmap(void)
md->num_pages = (m_start - md->phys_addr) >>
EFI_PAGE_SHIFT;
/* middle part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->attribute |= m_attr;
md->phys_addr = m_start;
md->num_pages = (m_end - m_start + 1) >>
EFI_PAGE_SHIFT;
/* last part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->phys_addr = m_end + 1;
md->num_pages = (end - m_end) >>
@@ -169,8 +168,8 @@ void __init efi_fake_memmap(void)
md->num_pages = (m_start - md->phys_addr) >>
EFI_PAGE_SHIFT;
/* latter part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->phys_addr = m_start;
md->num_pages = (end - md->phys_addr + 1) >>
@@ -182,10 +181,10 @@ void __init efi_fake_memmap(void)
/* swap into new EFI memmap */
efi_unmap_memmap();
- memmap.map = new_memmap;
- memmap.phys_map = new_memmap_phy;
- memmap.nr_map = new_nr_map;
- memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
+ efi.memmap.map = new_memmap;
+ efi.memmap.phys_map = new_memmap_phy;
+ efi.memmap.nr_map = new_nr_map;
+ efi.memmap.map_end = efi.memmap.map + efi.memmap.nr_map * efi.memmap.desc_size;
set_bit(EFI_MEMMAP, &efi.flags);
/* print new EFI memmap */
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index da99bbb74aeb..c06945160a41 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o
+lib-y := efi-stub-helper.o gop.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 414deb85c2e5..993aa56755f6 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -20,27 +20,49 @@
bool __nokaslr;
-static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
+static int efi_get_secureboot(efi_system_table_t *sys_table_arg)
{
- static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
- static efi_char16_t const var_name[] = {
+ static efi_char16_t const sb_var_name[] = {
'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
+ static efi_char16_t const sm_var_name[] = {
+ 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 };
+ efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID;
efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
- unsigned long size = sizeof(u8);
- efi_status_t status;
u8 val;
+ unsigned long size = sizeof(val);
+ efi_status_t status;
- status = f_getvar((efi_char16_t *)var_name, (efi_guid_t *)&var_guid,
+ status = f_getvar((efi_char16_t *)sb_var_name, (efi_guid_t *)&var_guid,
NULL, &size, &val);
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ if (val == 0)
+ return 0;
+
+ status = f_getvar((efi_char16_t *)sm_var_name, (efi_guid_t *)&var_guid,
+ NULL, &size, &val);
+
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ if (val == 1)
+ return 0;
+
+ return 1;
+
+out_efi_err:
switch (status) {
- case EFI_SUCCESS:
- return val;
case EFI_NOT_FOUND:
return 0;
+ case EFI_DEVICE_ERROR:
+ return -EIO;
+ case EFI_SECURITY_VIOLATION:
+ return -EACCES;
default:
- return 1;
+ return -EINVAL;
}
}
@@ -147,6 +169,25 @@ void efi_char16_printk(efi_system_table_t *sys_table_arg,
out->output_string(out, str);
}
+static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+ efi_status_t status;
+ unsigned long size;
+ void **gop_handle = NULL;
+ struct screen_info *si = NULL;
+
+ size = 0;
+ status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &gop_proto, NULL, &size, gop_handle);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ si = alloc_screen_info(sys_table_arg);
+ if (!si)
+ return NULL;
+ efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+ }
+ return si;
+}
/*
* This function handles the architcture specific differences between arm and
@@ -185,6 +226,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
unsigned long reserve_addr = 0;
unsigned long reserve_size = 0;
+ int secure_boot = 0;
+ struct screen_info *si;
/* Check if we were booted by the EFI firmware */
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
@@ -237,6 +280,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
__nokaslr = true;
}
+ si = setup_graphics(sys_table);
+
status = handle_kernel_image(sys_table, image_addr, &image_size,
&reserve_addr,
&reserve_size,
@@ -250,12 +295,21 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS)
pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
+ secure_boot = efi_get_secureboot(sys_table);
+ if (secure_boot > 0)
+ pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+
+ if (secure_boot < 0) {
+ pr_efi_err(sys_table,
+ "could not determine UEFI Secure Boot status.\n");
+ }
+
/*
* Unauthenticated device tree data is a security hazard, so
* ignore 'dtb=' unless UEFI Secure Boot is disabled.
*/
- if (efi_secureboot_enabled(sys_table)) {
- pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+ if (secure_boot != 0 && strstr(cmdline_ptr, "dtb=")) {
+ pr_efi(sys_table, "Ignoring DTB from command line.\n");
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
"dtb=",
@@ -309,6 +363,7 @@ fail_free_image:
efi_free(sys_table, image_size, *image_addr);
efi_free(sys_table, reserve_size, reserve_addr);
fail_free_cmdline:
+ free_screen_info(sys_table, si);
efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 6f42be4d0084..e1f0b28e1dcb 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -26,6 +26,43 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
return EFI_SUCCESS;
}
+static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
+
+struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+{
+ struct screen_info *si;
+ efi_status_t status;
+
+ /*
+ * Unlike on arm64, where we can directly fill out the screen_info
+ * structure from the stub, we need to allocate a buffer to hold
+ * its contents while we hand over to the kernel proper from the
+ * decompressor.
+ */
+ status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*si), (void **)&si);
+
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ status = efi_call_early(install_configuration_table,
+ &screen_info_guid, si);
+ if (status == EFI_SUCCESS)
+ return si;
+
+ efi_call_early(free_pool, si);
+ return NULL;
+}
+
+void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+{
+ if (!si)
+ return;
+
+ efi_call_early(install_configuration_table, &screen_info_guid, NULL);
+ efi_call_early(free_pool, si);
+}
+
efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
unsigned long *image_addr,
unsigned long *image_size,
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index a90f6459f5c6..eae693eb3e91 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -81,15 +81,24 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
+ * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
+ * displacement in the interval [0, MIN_KIMG_ALIGN) that
+ * is a multiple of the minimal segment alignment (SZ_64K)
+ */
+ u32 mask = (MIN_KIMG_ALIGN - 1) & ~(SZ_64K - 1);
+ u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
+ (phys_seed >> 32) & mask : TEXT_OFFSET;
+
+ /*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
- *reserve_size = kernel_memsize + TEXT_OFFSET;
+ *reserve_size = kernel_memsize + offset;
status = efi_random_alloc(sys_table_arg, *reserve_size,
MIN_KIMG_ALIGN, reserve_addr,
- phys_seed);
+ (u32)phys_seed);
- *image_addr = *reserve_addr + TEXT_OFFSET;
+ *image_addr = *reserve_addr + offset;
} else {
/*
* Else, try a straight allocation at the preferred offset.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 29ed2f9b218c..3bd127f95315 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -125,10 +125,12 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
map.map_end = map.map + map_size;
- for_each_efi_memory_desc(&map, md)
- if (md->attribute & EFI_MEMORY_WB)
+ for_each_efi_memory_desc_in_map(&map, md) {
+ if (md->attribute & EFI_MEMORY_WB) {
if (membase > md->phys_addr)
membase = md->phys_addr;
+ }
+ }
efi_call_early(free_pool, map.map);
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 6dba78aef337..e58abfa953cc 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -24,7 +24,7 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
unsigned long map_size, unsigned long desc_size,
u32 desc_ver)
{
- int node, prev, num_rsv;
+ int node, num_rsv;
int status;
u32 fdt_val32;
u64 fdt_val64;
@@ -54,28 +54,6 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
goto fdt_set_fail;
/*
- * Delete any memory nodes present. We must delete nodes which
- * early_init_dt_scan_memory may try to use.
- */
- prev = 0;
- for (;;) {
- const char *type;
- int len;
-
- node = fdt_next_node(fdt, prev, NULL);
- if (node < 0)
- break;
-
- type = fdt_getprop(fdt, node, "device_type", &len);
- if (type && strncmp(type, "memory", len) == 0) {
- fdt_del_node(fdt, node);
- continue;
- }
-
- prev = node;
- }
-
- /*
* Delete all memory reserve map entries. When booting via UEFI,
* kernel will use the UEFI memory map to find reserved regions.
*/
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
new file mode 100644
index 000000000000..932742e4cf23
--- /dev/null
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -0,0 +1,354 @@
+/* -----------------------------------------------------------------------
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+static void find_bits(unsigned long mask, u8 *pos, u8 *size)
+{
+ u8 first, len;
+
+ first = 0;
+ len = 0;
+
+ if (mask) {
+ while (!(mask & 0x1)) {
+ mask = mask >> 1;
+ first++;
+ }
+
+ while (mask & 0x1) {
+ mask = mask >> 1;
+ len++;
+ }
+ }
+
+ *pos = first;
+ *size = len;
+}
+
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+ struct efi_pixel_bitmask pixel_info, int pixel_format)
+{
+ if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
+ si->red_size = 8;
+ si->red_pos = 0;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 16;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
+ } else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
+ } else if (pixel_format == PIXEL_BIT_MASK) {
+ find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
+ find_bits(pixel_info.green_mask, &si->green_pos,
+ &si->green_size);
+ find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
+ find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
+ &si->rsvd_size);
+ si->lfb_depth = si->red_size + si->green_size +
+ si->blue_size + si->rsvd_size;
+ si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+ } else {
+ si->lfb_depth = 4;
+ si->lfb_linelength = si->lfb_width / 2;
+ si->red_size = 0;
+ si->red_pos = 0;
+ si->green_size = 0;
+ si->green_pos = 0;
+ si->blue_size = 0;
+ si->blue_pos = 0;
+ si->rsvd_size = 0;
+ si->rsvd_pos = 0;
+ }
+}
+
+static efi_status_t
+__gop_query32(efi_system_table_t *sys_table_arg,
+ struct efi_graphics_output_protocol_32 *gop32,
+ struct efi_graphics_output_mode_info **info,
+ unsigned long *size, u64 *fb_base)
+{
+ struct efi_graphics_output_protocol_mode_32 *mode;
+ efi_graphics_output_protocol_query_mode query_mode;
+ efi_status_t status;
+ unsigned long m;
+
+ m = gop32->mode;
+ mode = (struct efi_graphics_output_protocol_mode_32 *)m;
+ query_mode = (void *)(unsigned long)gop32->query_mode;
+
+ status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
+ info);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *fb_base = mode->frame_buffer_base;
+ return status;
+}
+
+static efi_status_t
+setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
+ efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+ struct efi_graphics_output_protocol_32 *gop32, *first_gop;
+ unsigned long nr_gops;
+ u16 width, height;
+ u32 pixels_per_scan_line;
+ u32 ext_lfb_base;
+ u64 fb_base;
+ struct efi_pixel_bitmask pixel_info;
+ int pixel_format;
+ efi_status_t status = EFI_NOT_FOUND;
+ u32 *handles = (u32 *)(unsigned long)gop_handle;
+ int i;
+
+ first_gop = NULL;
+ gop32 = NULL;
+
+ nr_gops = size / sizeof(u32);
+ for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_mode_info *info = NULL;
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ bool conout_found = false;
+ void *dummy = NULL;
+ efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+ u64 current_fb_base;
+
+ status = efi_call_early(handle_protocol, h,
+ proto, (void **)&gop32);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ status = efi_call_early(handle_protocol, h,
+ &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ conout_found = true;
+
+ status = __gop_query32(sys_table_arg, gop32, &info, &size,
+ &current_fb_base);
+ if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ */
+ width = info->horizontal_resolution;
+ height = info->vertical_resolution;
+ pixel_format = info->pixel_format;
+ pixel_info = info->pixel_information;
+ pixels_per_scan_line = info->pixels_per_scan_line;
+ fb_base = current_fb_base;
+
+ /*
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ first_gop = gop32;
+ if (conout_found)
+ break;
+ }
+ }
+
+ /* Did we find any GOPs? */
+ if (!first_gop)
+ goto out;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = width;
+ si->lfb_height = height;
+ si->lfb_base = fb_base;
+
+ ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+ if (ext_lfb_base) {
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ si->ext_lfb_base = ext_lfb_base;
+ }
+
+ si->pages = 1;
+
+ setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+ return status;
+}
+
+static efi_status_t
+__gop_query64(efi_system_table_t *sys_table_arg,
+ struct efi_graphics_output_protocol_64 *gop64,
+ struct efi_graphics_output_mode_info **info,
+ unsigned long *size, u64 *fb_base)
+{
+ struct efi_graphics_output_protocol_mode_64 *mode;
+ efi_graphics_output_protocol_query_mode query_mode;
+ efi_status_t status;
+ unsigned long m;
+
+ m = gop64->mode;
+ mode = (struct efi_graphics_output_protocol_mode_64 *)m;
+ query_mode = (void *)(unsigned long)gop64->query_mode;
+
+ status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
+ info);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *fb_base = mode->frame_buffer_base;
+ return status;
+}
+
+static efi_status_t
+setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
+ efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+ struct efi_graphics_output_protocol_64 *gop64, *first_gop;
+ unsigned long nr_gops;
+ u16 width, height;
+ u32 pixels_per_scan_line;
+ u32 ext_lfb_base;
+ u64 fb_base;
+ struct efi_pixel_bitmask pixel_info;
+ int pixel_format;
+ efi_status_t status = EFI_NOT_FOUND;
+ u64 *handles = (u64 *)(unsigned long)gop_handle;
+ int i;
+
+ first_gop = NULL;
+ gop64 = NULL;
+
+ nr_gops = size / sizeof(u64);
+ for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_mode_info *info = NULL;
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ bool conout_found = false;
+ void *dummy = NULL;
+ efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+ u64 current_fb_base;
+
+ status = efi_call_early(handle_protocol, h,
+ proto, (void **)&gop64);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ status = efi_call_early(handle_protocol, h,
+ &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ conout_found = true;
+
+ status = __gop_query64(sys_table_arg, gop64, &info, &size,
+ &current_fb_base);
+ if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ */
+ width = info->horizontal_resolution;
+ height = info->vertical_resolution;
+ pixel_format = info->pixel_format;
+ pixel_info = info->pixel_information;
+ pixels_per_scan_line = info->pixels_per_scan_line;
+ fb_base = current_fb_base;
+
+ /*
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ first_gop = gop64;
+ if (conout_found)
+ break;
+ }
+ }
+
+ /* Did we find any GOPs? */
+ if (!first_gop)
+ goto out;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = width;
+ si->lfb_height = height;
+ si->lfb_base = fb_base;
+
+ ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+ if (ext_lfb_base) {
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ si->ext_lfb_base = ext_lfb_base;
+ }
+
+ si->pages = 1;
+
+ setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+ return status;
+}
+
+/*
+ * See if we have Graphics Output Protocol
+ */
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+ struct screen_info *si, efi_guid_t *proto,
+ unsigned long size)
+{
+ efi_status_t status;
+ void **gop_handle = NULL;
+
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)&gop_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ proto, NULL, &size, gop_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ if (efi_is_64bit()) {
+ status = setup_gop64(sys_table_arg, si, proto, size,
+ gop_handle);
+ } else {
+ status = setup_gop32(sys_table_arg, si, proto, size,
+ gop_handle);
+ }
+
+free_handle:
+ efi_call_early(free_pool, gop_handle);
+ return status;
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644
index 000000000000..236004b9a50d
--- /dev/null
+++ b/drivers/firmware/efi/memattr.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+ efi_memory_attributes_table_t *tbl;
+
+ if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi.mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (tbl->version > 1) {
+ pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+ tbl->version);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+ memblock_reserve(efi.mem_attr_table, tbl_size);
+
+unmap:
+ early_memunmap(tbl, sizeof(*tbl));
+ return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+ u64 in_paddr = in->phys_addr;
+ u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+ efi_memory_desc_t *md;
+
+ *out = *in;
+
+ if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+ in->type != EFI_RUNTIME_SERVICES_DATA) {
+ pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+ return false;
+ }
+
+ if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+ pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+ return false;
+ }
+
+ if (PAGE_SIZE > EFI_PAGE_SIZE &&
+ (!PAGE_ALIGNED(in->phys_addr) ||
+ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+ /*
+ * Since arm64 may execute with page sizes of up to 64 KB, the
+ * UEFI spec mandates that RuntimeServices memory regions must
+ * be 64 KB aligned. We need to validate this here since we will
+ * not be able to tighten permissions on such regions without
+ * affecting adjacent regions.
+ */
+ pr_warn("Entry address region misaligned\n");
+ return false;
+ }
+
+ for_each_efi_memory_desc(md) {
+ u64 md_paddr = md->phys_addr;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0) {
+ /* no virtual mapping has been installed by the stub */
+ break;
+ }
+
+ if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+ continue;
+
+ /*
+ * This entry covers the start of @in, check whether
+ * it covers the end as well.
+ */
+ if (md_paddr + md_size < in_paddr + in_size) {
+ pr_warn("Entry covers multiple EFI memory map regions\n");
+ return false;
+ }
+
+ if (md->type != in->type) {
+ pr_warn("Entry type deviates from EFI memory map region type\n");
+ return false;
+ }
+
+ out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+ return true;
+ }
+
+ pr_warn("No matching entry found in the EFI memory map\n");
+ return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn)
+{
+ efi_memory_attributes_table_t *tbl;
+ int i, ret;
+
+ if (tbl_size <= sizeof(*tbl))
+ return 0;
+
+ /*
+ * We need the EFI memory map to be setup so we can use it to
+ * lookup the virtual addresses of all entries in the of EFI
+ * Memory Attributes table. If it isn't available, this
+ * function should not be called.
+ */
+ if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+ return 0;
+
+ tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi.mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI Memory Attributes table:\n");
+
+ for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+ efi_memory_desc_t md;
+ unsigned long size;
+ bool valid;
+ char buf[64];
+
+ valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+ &md);
+ size = md.num_pages << EFI_PAGE_SHIFT;
+ if (efi_enabled(EFI_DBG) || !valid)
+ pr_info("%s 0x%012llx-0x%012llx %s\n",
+ valid ? "" : "!", md.phys_addr,
+ md.phys_addr + size - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+ if (valid)
+ ret = fn(mm, &md);
+ }
+ memunmap(tbl);
+ return ret;
+}
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 9c59d1c795d1..62ead9b9d871 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -9,7 +9,8 @@ int efi_reboot_quirk_mode = -1;
void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
{
- int efi_mode;
+ const char *str[] = { "cold", "warm", "shutdown", "platform" };
+ int efi_mode, cap_reset_mode;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return;
@@ -30,6 +31,15 @@ void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
if (efi_reboot_quirk_mode != -1)
efi_mode = efi_reboot_quirk_mode;
+ if (efi_capsule_pending(&cap_reset_mode)) {
+ if (efi_mode != cap_reset_mode)
+ printk(KERN_CRIT "efi: %s reset requested but pending "
+ "capsule update requires %s reset... Performing "
+ "%s reset.\n", str[efi_mode], str[cap_reset_mode],
+ str[cap_reset_mode]);
+ efi_mode = cap_reset_mode;
+ }
+
efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL);
}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index de6953039af6..23bef6bb73ee 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -16,10 +16,70 @@
#include <linux/bug.h>
#include <linux/efi.h>
+#include <linux/irqflags.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/stringify.h>
#include <asm/efi.h>
+static void efi_call_virt_check_flags(unsigned long flags, const char *call)
+{
+ unsigned long cur_flags, mismatch;
+
+ local_save_flags(cur_flags);
+
+ mismatch = flags ^ cur_flags;
+ if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
+ return;
+
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
+ pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
+ flags, cur_flags, call);
+ local_irq_restore(flags);
+}
+
+/*
+ * Arch code can implement the following three template macros, avoiding
+ * reptition for the void/non-void return cases of {__,}efi_call_virt:
+ *
+ * * arch_efi_call_virt_setup
+ *
+ * Sets up the environment for the call (e.g. switching page tables,
+ * allowing kernel-mode use of floating point, if required).
+ *
+ * * arch_efi_call_virt
+ *
+ * Performs the call. The last expression in the macro must be the call
+ * itself, allowing the logic to be shared by the void and non-void
+ * cases.
+ *
+ * * arch_efi_call_virt_teardown
+ *
+ * Restores the usual kernel environment once the call has returned.
+ */
+
+#define efi_call_virt(f, args...) \
+({ \
+ efi_status_t __s; \
+ unsigned long flags; \
+ arch_efi_call_virt_setup(); \
+ local_save_flags(flags); \
+ __s = arch_efi_call_virt(f, args); \
+ efi_call_virt_check_flags(flags, __stringify(f)); \
+ arch_efi_call_virt_teardown(); \
+ __s; \
+})
+
+#define __efi_call_virt(f, args...) \
+({ \
+ unsigned long flags; \
+ arch_efi_call_virt_setup(); \
+ local_save_flags(flags); \
+ arch_efi_call_virt(f, args); \
+ efi_call_virt_check_flags(flags, __stringify(f)); \
+ arch_efi_call_virt_teardown(); \
+})
+
/*
* According to section 7.1 of the UEFI spec, Runtime Services are not fully
* reentrant, and there are particular combinations of calls that need to be
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 34b741940494..d3b751383286 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -329,39 +329,6 @@ check_var_size_nonblocking(u32 attributes, unsigned long size)
return fops->query_variable_store(attributes, size, true);
}
-static int efi_status_to_err(efi_status_t status)
-{
- int err;
-
- switch (status) {
- case EFI_SUCCESS:
- err = 0;
- break;
- case EFI_INVALID_PARAMETER:
- err = -EINVAL;
- break;
- case EFI_OUT_OF_RESOURCES:
- err = -ENOSPC;
- break;
- case EFI_DEVICE_ERROR:
- err = -EIO;
- break;
- case EFI_WRITE_PROTECTED:
- err = -EROFS;
- break;
- case EFI_SECURITY_VIOLATION:
- err = -EACCES;
- break;
- case EFI_NOT_FOUND:
- err = -ENOENT;
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
struct list_head *head)
{
@@ -452,8 +419,7 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
* Returns 0 on success, or a kernel error code on failure.
*/
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool atomic, bool duplicates,
- struct list_head *head)
+ void *data, bool duplicates, struct list_head *head)
{
const struct efivar_operations *ops = __efivars->ops;
unsigned long variable_name_size = 1024;
@@ -483,7 +449,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
&vendor_guid);
switch (status) {
case EFI_SUCCESS:
- if (!atomic)
+ if (duplicates)
spin_unlock_irq(&__efivars->lock);
variable_name_size = var_name_strnsize(variable_name,
@@ -498,21 +464,19 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
* and may end up looping here forever.
*/
if (duplicates &&
- variable_is_present(variable_name, &vendor_guid, head)) {
+ variable_is_present(variable_name, &vendor_guid,
+ head)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
- if (!atomic)
- spin_lock_irq(&__efivars->lock);
-
status = EFI_NOT_FOUND;
- break;
+ } else {
+ err = func(variable_name, vendor_guid,
+ variable_name_size, data);
+ if (err)
+ status = EFI_NOT_FOUND;
}
- err = func(variable_name, vendor_guid, variable_name_size, data);
- if (err)
- status = EFI_NOT_FOUND;
-
- if (!atomic)
+ if (duplicates)
spin_lock_irq(&__efivars->lock);
break;
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index b5d05807e6ec..fa4ea22ca12e 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -355,7 +355,7 @@ int psci_cpu_suspend_enter(unsigned long index)
/* ARM specific CPU idle operations */
#ifdef CONFIG_ARM
-static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
.suspend = psci_cpu_suspend_enter,
.init = psci_dt_cpu_init_idle,
};
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 6743ff7dccfa..059f7c39c582 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -72,7 +72,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
drm_cache_flush_clflush(pages, num_pages);
return;
}
@@ -105,7 +105,7 @@ void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
mb();
@@ -129,7 +129,7 @@ void
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dabc08987b5e..f2cb9a9539ee 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1732,7 +1732,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
- if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+ if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file, args->handle);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1328bc5021b4..b845f468dd74 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -488,7 +488,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
ret = relocate_entry_cpu(obj, reloc, target_offset);
else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
- else if (cpu_has_clflush)
+ else if (static_cpu_has(X86_FEATURE_CLFLUSH))
ret = relocate_entry_clflush(obj, reloc, target_offset);
else {
WARN_ONCE(1, "Impossible case in relocation handling\n");
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5c2d13a687aa..ff940075bb90 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -288,7 +288,7 @@ config SENSORS_K10TEMP
config SENSORS_FAM15H_POWER
tristate "AMD Family 15h processor power"
- depends on X86 && PCI
+ depends on X86 && PCI && CPU_SUP_AMD
help
If you say yes here you get support for processor power
information of your AMD family 15h CPU.
@@ -621,7 +621,8 @@ config SENSORS_IT87
If you say yes here you get support for ITE IT8705F, IT8712F, IT8716F,
IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F, IT8758E,
IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E,
- IT8603E, IT8620E, and IT8623E sensor chips, and the SiS950 clone.
+ IT8603E, IT8620E, IT8623E, and IT8628E sensor chips, and the SiS950
+ clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -821,6 +822,16 @@ config SENSORS_MAX197
This driver can also be built as a module. If so, the module
will be called max197.
+config SENSORS_MAX31722
+tristate "MAX31722 temperature sensor"
+ depends on SPI
+ help
+ Support for the Maxim Integrated MAX31722/MAX31723 digital
+ thermometers/thermostats operating over an SPI interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31722.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 58cc3acba7e7..2ef5b7c4c54f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
+obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 6c99ee7bafa3..ee396ff167d9 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -120,6 +120,7 @@ static int ads7828_probe(struct i2c_client *client,
unsigned int vref_mv = ADS7828_INT_VREF_MV;
bool diff_input = false;
bool ext_vref = false;
+ unsigned int regval;
data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
if (!data)
@@ -154,6 +155,15 @@ static int ads7828_probe(struct i2c_client *client,
if (!diff_input)
data->cmd_byte |= ADS7828_CMD_SD_SE;
+ /*
+ * Datasheet specifies internal reference voltage is disabled by
+ * default. The internal reference voltage needs to be enabled and
+ * voltage needs to settle before getting valid ADC data. So perform a
+ * dummy read to enable the internal reference voltage.
+ */
+ if (!ext_vref)
+ regmap_read(data->regmap, data->cmd_byte, &regval);
+
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data,
ads7828_groups);
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 4f695d8fcafa..eb97a9241d17 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -1,7 +1,7 @@
/*
* fam15h_power.c - AMD Family 15h processor power monitoring
*
- * Copyright (c) 2011 Advanced Micro Devices, Inc.
+ * Copyright (c) 2011-2016 Advanced Micro Devices, Inc.
* Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
*
*
@@ -25,6 +25,10 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/time.h>
+#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/msr.h>
@@ -44,8 +48,14 @@ MODULE_LICENSE("GPL");
#define FAM15H_MIN_NUM_ATTRS 2
#define FAM15H_NUM_GROUPS 2
+#define MAX_CUS 8
+/* set maximum interval as 1 second */
+#define MAX_INTERVAL 1000
+
+#define MSR_F15H_CU_PWR_ACCUMULATOR 0xc001007a
#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
+#define MSR_F15H_PTSC 0xc0010280
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F4 0x15b4
@@ -59,8 +69,20 @@ struct fam15h_power_data {
struct attribute_group group;
/* maximum accumulated power of a compute unit */
u64 max_cu_acc_power;
+ /* accumulated power of the compute units */
+ u64 cu_acc_power[MAX_CUS];
+ /* performance timestamp counter */
+ u64 cpu_sw_pwr_ptsc[MAX_CUS];
+ /* online/offline status of current compute unit */
+ int cu_on[MAX_CUS];
+ unsigned long power_period;
};
+static bool is_carrizo_or_later(void)
+{
+ return boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60;
+}
+
static ssize_t show_power(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -77,7 +99,7 @@ static ssize_t show_power(struct device *dev,
* On Carrizo and later platforms, TdpRunAvgAccCap bit field
* is extended to 4:31 from 4:25.
*/
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60) {
+ if (is_carrizo_or_later()) {
running_avg_capture = val >> 4;
running_avg_capture = sign_extend32(running_avg_capture, 27);
} else {
@@ -94,7 +116,7 @@ static ssize_t show_power(struct device *dev,
* On Carrizo and later platforms, ApmTdpLimit bit field
* is extended to 16:31 from 16:28.
*/
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60)
+ if (is_carrizo_or_later())
tdp_limit = val >> 16;
else
tdp_limit = (val >> 16) & 0x1fff;
@@ -125,6 +147,167 @@ static ssize_t show_power_crit(struct device *dev,
}
static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
+static void do_read_registers_on_cu(void *_data)
+{
+ struct fam15h_power_data *data = _data;
+ int cpu, cu;
+
+ cpu = smp_processor_id();
+
+ /*
+ * With the new x86 topology modelling, cpu core id actually
+ * is compute unit id.
+ */
+ cu = cpu_data(cpu).cpu_core_id;
+
+ rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
+ rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
+
+ data->cu_on[cu] = 1;
+}
+
+/*
+ * This function is only able to be called when CPUID
+ * Fn8000_0007:EDX[12] is set.
+ */
+static int read_registers(struct fam15h_power_data *data)
+{
+ int this_cpu, ret, cpu;
+ int core, this_core;
+ cpumask_var_t mask;
+
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
+
+ get_online_cpus();
+ this_cpu = smp_processor_id();
+
+ /*
+ * Choose the first online core of each compute unit, and then
+ * read their MSR value of power and ptsc in a single IPI,
+ * because the MSR value of CPU core represent the compute
+ * unit's.
+ */
+ core = -1;
+
+ for_each_online_cpu(cpu) {
+ this_core = topology_core_id(cpu);
+
+ if (this_core == core)
+ continue;
+
+ core = this_core;
+
+ /* get any CPU on this compute unit */
+ cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
+ }
+
+ if (cpumask_test_cpu(this_cpu, mask))
+ do_read_registers_on_cu(data);
+
+ smp_call_function_many(mask, do_read_registers_on_cu, data, true);
+ put_online_cpus();
+
+ free_cpumask_var(mask);
+
+ return 0;
+}
+
+static ssize_t acc_show_power(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fam15h_power_data *data = dev_get_drvdata(dev);
+ u64 prev_cu_acc_power[MAX_CUS], prev_ptsc[MAX_CUS],
+ jdelta[MAX_CUS];
+ u64 tdelta, avg_acc;
+ int cu, cu_num, ret;
+ signed long leftover;
+
+ /*
+ * With the new x86 topology modelling, x86_max_cores is the
+ * compute unit number.
+ */
+ cu_num = boot_cpu_data.x86_max_cores;
+
+ ret = read_registers(data);
+ if (ret)
+ return 0;
+
+ for (cu = 0; cu < cu_num; cu++) {
+ prev_cu_acc_power[cu] = data->cu_acc_power[cu];
+ prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
+ }
+
+ leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
+ if (leftover)
+ return 0;
+
+ ret = read_registers(data);
+ if (ret)
+ return 0;
+
+ for (cu = 0, avg_acc = 0; cu < cu_num; cu++) {
+ /* check if current compute unit is online */
+ if (data->cu_on[cu] == 0)
+ continue;
+
+ if (data->cu_acc_power[cu] < prev_cu_acc_power[cu]) {
+ jdelta[cu] = data->max_cu_acc_power + data->cu_acc_power[cu];
+ jdelta[cu] -= prev_cu_acc_power[cu];
+ } else {
+ jdelta[cu] = data->cu_acc_power[cu] - prev_cu_acc_power[cu];
+ }
+ tdelta = data->cpu_sw_pwr_ptsc[cu] - prev_ptsc[cu];
+ jdelta[cu] *= data->cpu_pwr_sample_ratio * 1000;
+ do_div(jdelta[cu], tdelta);
+
+ /* the unit is microWatt */
+ avg_acc += jdelta[cu];
+ }
+
+ return sprintf(buf, "%llu\n", (unsigned long long)avg_acc);
+}
+static DEVICE_ATTR(power1_average, S_IRUGO, acc_show_power, NULL);
+
+static ssize_t acc_show_power_period(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fam15h_power_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu\n", data->power_period);
+}
+
+static ssize_t acc_set_power_period(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fam15h_power_data *data = dev_get_drvdata(dev);
+ unsigned long temp;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &temp);
+ if (ret)
+ return ret;
+
+ if (temp > MAX_INTERVAL)
+ return -EINVAL;
+
+ /* the interval value should be greater than 0 */
+ if (temp <= 0)
+ return -EINVAL;
+
+ data->power_period = temp;
+
+ return count;
+}
+static DEVICE_ATTR(power1_average_interval, S_IRUGO | S_IWUSR,
+ acc_show_power_period, acc_set_power_period);
+
static int fam15h_power_init_attrs(struct pci_dev *pdev,
struct fam15h_power_data *data)
{
@@ -137,6 +320,10 @@ static int fam15h_power_init_attrs(struct pci_dev *pdev,
(c->x86_model >= 0x60 && c->x86_model <= 0x7f)))
n += 1;
+ /* check if processor supports accumulated power */
+ if (boot_cpu_has(X86_FEATURE_ACC_POWER))
+ n += 2;
+
fam15h_power_attrs = devm_kcalloc(&pdev->dev, n,
sizeof(*fam15h_power_attrs),
GFP_KERNEL);
@@ -151,6 +338,11 @@ static int fam15h_power_init_attrs(struct pci_dev *pdev,
(c->x86_model >= 0x60 && c->x86_model <= 0x7f)))
fam15h_power_attrs[n++] = &dev_attr_power1_input.attr;
+ if (boot_cpu_has(X86_FEATURE_ACC_POWER)) {
+ fam15h_power_attrs[n++] = &dev_attr_power1_average.attr;
+ fam15h_power_attrs[n++] = &dev_attr_power1_average_interval.attr;
+ }
+
data->group.attrs = fam15h_power_attrs;
return 0;
@@ -216,7 +408,7 @@ static int fam15h_power_resume(struct pci_dev *pdev)
static int fam15h_power_init_data(struct pci_dev *f4,
struct fam15h_power_data *data)
{
- u32 val, eax, ebx, ecx, edx;
+ u32 val;
u64 tmp;
int ret;
@@ -243,10 +435,9 @@ static int fam15h_power_init_data(struct pci_dev *f4,
if (ret)
return ret;
- cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
/* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */
- if (!(edx & BIT(12)))
+ if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
return 0;
/*
@@ -254,7 +445,7 @@ static int fam15h_power_init_data(struct pci_dev *f4,
* sample period to the PTSC counter period by executing CPUID
* Fn8000_0007:ECX
*/
- data->cpu_pwr_sample_ratio = ecx;
+ data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
@@ -263,7 +454,15 @@ static int fam15h_power_init_data(struct pci_dev *f4,
data->max_cu_acc_power = tmp;
- return 0;
+ /*
+ * Milliseconds are a reasonable interval for the measurement.
+ * But it shouldn't set too long here, because several seconds
+ * would cause the read function to hang. So set default
+ * interval as 10 ms.
+ */
+ data->power_period = 10;
+
+ return read_registers(data);
}
static int fam15h_power_probe(struct pci_dev *pdev,
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1896e26df634..730d84028260 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -13,6 +13,7 @@
* Supports: IT8603E Super I/O chip w/LPC interface
* IT8620E Super I/O chip w/LPC interface
* IT8623E Super I/O chip w/LPC interface
+ * IT8628E Super I/O chip w/LPC interface
* IT8705F Super I/O chip w/LPC interface
* IT8712F Super I/O chip w/LPC interface
* IT8716F Super I/O chip w/LPC interface
@@ -44,14 +45,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -72,17 +70,18 @@
enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
it8771, it8772, it8781, it8782, it8783, it8786, it8790, it8603,
- it8620 };
+ it8620, it8628 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static struct platform_device *pdev;
+static struct platform_device *it87_pdev[2];
+
+#define REG_2E 0x2e /* The register to read/write */
+#define REG_4E 0x4e /* Secondary register to read/write */
-#define REG 0x2e /* The register to read/write */
#define DEV 0x07 /* Register: Logical device select */
-#define VAL 0x2f /* The value to read/write */
#define PME 0x04 /* The device with the fan registers in it */
/* The device with the IT8718F/IT8720F VID value in it */
@@ -91,54 +90,55 @@ static struct platform_device *pdev;
#define DEVID 0x20 /* Register: Device ID */
#define DEVREV 0x22 /* Register: Device Revision */
-static inline int superio_inb(int reg)
+static inline int superio_inb(int ioreg, int reg)
{
- outb(reg, REG);
- return inb(VAL);
+ outb(reg, ioreg);
+ return inb(ioreg + 1);
}
-static inline void superio_outb(int reg, int val)
+static inline void superio_outb(int ioreg, int reg, int val)
{
- outb(reg, REG);
- outb(val, VAL);
+ outb(reg, ioreg);
+ outb(val, ioreg + 1);
}
-static int superio_inw(int reg)
+static int superio_inw(int ioreg, int reg)
{
int val;
- outb(reg++, REG);
- val = inb(VAL) << 8;
- outb(reg, REG);
- val |= inb(VAL);
+
+ outb(reg++, ioreg);
+ val = inb(ioreg + 1) << 8;
+ outb(reg, ioreg);
+ val |= inb(ioreg + 1);
return val;
}
-static inline void superio_select(int ldn)
+static inline void superio_select(int ioreg, int ldn)
{
- outb(DEV, REG);
- outb(ldn, VAL);
+ outb(DEV, ioreg);
+ outb(ldn, ioreg + 1);
}
-static inline int superio_enter(void)
+static inline int superio_enter(int ioreg)
{
/*
- * Try to reserve REG and REG + 1 for exclusive access.
+ * Try to reserve ioreg and ioreg + 1 for exclusive access.
*/
- if (!request_muxed_region(REG, 2, DRVNAME))
+ if (!request_muxed_region(ioreg, 2, DRVNAME))
return -EBUSY;
- outb(0x87, REG);
- outb(0x01, REG);
- outb(0x55, REG);
- outb(0x55, REG);
+ outb(0x87, ioreg);
+ outb(0x01, ioreg);
+ outb(0x55, ioreg);
+ outb(ioreg == REG_4E ? 0xaa : 0x55, ioreg);
return 0;
}
-static inline void superio_exit(void)
+static inline void superio_exit(int ioreg)
{
- outb(0x02, REG);
- outb(0x02, VAL);
- release_region(REG, 2);
+ outb(0x02, ioreg);
+ outb(0x02, ioreg + 1);
+ release_region(ioreg, 2);
}
/* Logical device 4 registers */
@@ -161,6 +161,7 @@ static inline void superio_exit(void)
#define IT8603E_DEVID 0x8603
#define IT8620E_DEVID 0x8620
#define IT8623E_DEVID 0x8623
+#define IT8628E_DEVID 0x8628
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -168,6 +169,7 @@ static inline void superio_exit(void)
#define IT87_SIO_GPIO1_REG 0x25
#define IT87_SIO_GPIO2_REG 0x26
#define IT87_SIO_GPIO3_REG 0x27
+#define IT87_SIO_GPIO4_REG 0x28
#define IT87_SIO_GPIO5_REG 0x29
#define IT87_SIO_PINX1_REG 0x2a /* Pin selection */
#define IT87_SIO_PINX2_REG 0x2c /* Pin selection */
@@ -217,7 +219,12 @@ static bool fix_pwm_polarity;
#define IT87_REG_FAN_DIV 0x0b
#define IT87_REG_FAN_16BIT 0x0c
-/* Monitors: 9 voltage (0 to 7, battery), 3 temp (1 to 3), 3 fan (1 to 3) */
+/*
+ * Monitors:
+ * - up to 13 voltage (0 to 7, battery, avcc, 10 to 12)
+ * - up to 6 temp (1 to 6)
+ * - up to 6 fan (1 to 6)
+ */
static const u8 IT87_REG_FAN[] = { 0x0d, 0x0e, 0x0f, 0x80, 0x82, 0x4c };
static const u8 IT87_REG_FAN_MIN[] = { 0x10, 0x11, 0x12, 0x84, 0x86, 0x4e };
@@ -227,10 +234,12 @@ static const u8 IT87_REG_TEMP_OFFSET[] = { 0x56, 0x57, 0x59 };
#define IT87_REG_FAN_MAIN_CTRL 0x13
#define IT87_REG_FAN_CTL 0x14
-#define IT87_REG_PWM(nr) (0x15 + (nr))
-#define IT87_REG_PWM_DUTY(nr) (0x63 + (nr) * 8)
+static const u8 IT87_REG_PWM[] = { 0x15, 0x16, 0x17, 0x7f, 0xa7, 0xaf };
+static const u8 IT87_REG_PWM_DUTY[] = { 0x63, 0x6b, 0x73, 0x7b, 0xa3, 0xab };
+
+static const u8 IT87_REG_VIN[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
+ 0x27, 0x28, 0x2f, 0x2c, 0x2d, 0x2e };
-#define IT87_REG_VIN(nr) (0x20 + (nr))
#define IT87_REG_TEMP(nr) (0x29 + (nr))
#define IT87_REG_VIN_MAX(nr) (0x30 + (nr) * 2)
@@ -245,30 +254,48 @@ static const u8 IT87_REG_TEMP_OFFSET[] = { 0x56, 0x57, 0x59 };
#define IT87_REG_CHIPID 0x58
-#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
-#define IT87_REG_AUTO_PWM(nr, i) (0x65 + (nr) * 8 + (i))
+static const u8 IT87_REG_AUTO_BASE[] = { 0x60, 0x68, 0x70, 0x78, 0xa0, 0xa8 };
+
+#define IT87_REG_AUTO_TEMP(nr, i) (IT87_REG_AUTO_BASE[nr] + (i))
+#define IT87_REG_AUTO_PWM(nr, i) (IT87_REG_AUTO_BASE[nr] + 5 + (i))
+
+#define IT87_REG_TEMP456_ENABLE 0x77
+
+#define NUM_VIN ARRAY_SIZE(IT87_REG_VIN)
+#define NUM_VIN_LIMIT 8
+#define NUM_TEMP 6
+#define NUM_TEMP_OFFSET ARRAY_SIZE(IT87_REG_TEMP_OFFSET)
+#define NUM_TEMP_LIMIT 3
+#define NUM_FAN ARRAY_SIZE(IT87_REG_FAN)
+#define NUM_FAN_DIV 3
+#define NUM_PWM ARRAY_SIZE(IT87_REG_PWM)
+#define NUM_AUTO_PWM ARRAY_SIZE(IT87_REG_PWM)
struct it87_devices {
const char *name;
const char * const suffix;
- u16 features;
+ u32 features;
u8 peci_mask;
u8 old_peci_mask;
};
-#define FEAT_12MV_ADC (1 << 0)
-#define FEAT_NEWER_AUTOPWM (1 << 1)
-#define FEAT_OLD_AUTOPWM (1 << 2)
-#define FEAT_16BIT_FANS (1 << 3)
-#define FEAT_TEMP_OFFSET (1 << 4)
-#define FEAT_TEMP_PECI (1 << 5)
-#define FEAT_TEMP_OLD_PECI (1 << 6)
-#define FEAT_FAN16_CONFIG (1 << 7) /* Need to enable 16-bit fans */
-#define FEAT_FIVE_FANS (1 << 8) /* Supports five fans */
-#define FEAT_VID (1 << 9) /* Set if chip supports VID */
-#define FEAT_IN7_INTERNAL (1 << 10) /* Set if in7 is internal */
-#define FEAT_SIX_FANS (1 << 11) /* Supports six fans */
-#define FEAT_10_9MV_ADC (1 << 12)
+#define FEAT_12MV_ADC BIT(0)
+#define FEAT_NEWER_AUTOPWM BIT(1)
+#define FEAT_OLD_AUTOPWM BIT(2)
+#define FEAT_16BIT_FANS BIT(3)
+#define FEAT_TEMP_OFFSET BIT(4)
+#define FEAT_TEMP_PECI BIT(5)
+#define FEAT_TEMP_OLD_PECI BIT(6)
+#define FEAT_FAN16_CONFIG BIT(7) /* Need to enable 16-bit fans */
+#define FEAT_FIVE_FANS BIT(8) /* Supports five fans */
+#define FEAT_VID BIT(9) /* Set if chip supports VID */
+#define FEAT_IN7_INTERNAL BIT(10) /* Set if in7 is internal */
+#define FEAT_SIX_FANS BIT(11) /* Supports six fans */
+#define FEAT_10_9MV_ADC BIT(12)
+#define FEAT_AVCC3 BIT(13) /* Chip supports in9/AVCC3 */
+#define FEAT_SIX_PWM BIT(14) /* Chip supports 6 pwm chn */
+#define FEAT_PWM_FREQ2 BIT(15) /* Separate pwm freq 2 */
+#define FEAT_SIX_TEMP BIT(16) /* Up to 6 temp sensors */
static const struct it87_devices it87_devices[] = {
[it87] = {
@@ -286,20 +313,22 @@ static const struct it87_devices it87_devices[] = {
.name = "it8716",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
- | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+ | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_PWM_FREQ2,
},
[it8718] = {
.name = "it8718",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
+ | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8720] = {
.name = "it8720",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
+ | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8721] = {
@@ -307,7 +336,8 @@ static const struct it87_devices it87_devices[] = {
.suffix = "F",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
- | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_IN7_INTERNAL,
+ | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
.peci_mask = 0x05,
.old_peci_mask = 0x02, /* Actually reports PCH */
},
@@ -316,7 +346,7 @@ static const struct it87_devices it87_devices[] = {
.suffix = "F",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
- | FEAT_IN7_INTERNAL,
+ | FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8732] = {
@@ -332,7 +362,8 @@ static const struct it87_devices it87_devices[] = {
.name = "it8771",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
/* PECI: guesswork */
/* 12mV ADC (OHM) */
/* 16 bit fans (OHM) */
@@ -343,7 +374,8 @@ static const struct it87_devices it87_devices[] = {
.name = "it8772",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
/* PECI (coreboot) */
/* 12mV ADC (HWSensors4, OHM) */
/* 16 bit fans (HWSensors4, OHM) */
@@ -354,42 +386,45 @@ static const struct it87_devices it87_devices[] = {
.name = "it8781",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8782] = {
.name = "it8782",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8783] = {
.name = "it8783",
.suffix = "E/F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8786] = {
.name = "it8786",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8790] = {
.name = "it8790",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8603] = {
.name = "it8603",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_AVCC3 | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8620] = {
@@ -397,7 +432,17 @@ static const struct it87_devices it87_devices[] = {
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
- | FEAT_IN7_INTERNAL,
+ | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
+ | FEAT_SIX_TEMP,
+ .peci_mask = 0x07,
+ },
+ [it8628] = {
+ .name = "it8628",
+ .suffix = "E",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
+ | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
+ | FEAT_SIX_TEMP,
.peci_mask = 0x07,
},
};
@@ -409,16 +454,20 @@ static const struct it87_devices it87_devices[] = {
#define has_old_autopwm(data) ((data)->features & FEAT_OLD_AUTOPWM)
#define has_temp_offset(data) ((data)->features & FEAT_TEMP_OFFSET)
#define has_temp_peci(data, nr) (((data)->features & FEAT_TEMP_PECI) && \
- ((data)->peci_mask & (1 << nr)))
+ ((data)->peci_mask & BIT(nr)))
#define has_temp_old_peci(data, nr) \
(((data)->features & FEAT_TEMP_OLD_PECI) && \
- ((data)->old_peci_mask & (1 << nr)))
+ ((data)->old_peci_mask & BIT(nr)))
#define has_fan16_config(data) ((data)->features & FEAT_FAN16_CONFIG)
#define has_five_fans(data) ((data)->features & (FEAT_FIVE_FANS | \
FEAT_SIX_FANS))
#define has_vid(data) ((data)->features & FEAT_VID)
#define has_in7_internal(data) ((data)->features & FEAT_IN7_INTERNAL)
#define has_six_fans(data) ((data)->features & FEAT_SIX_FANS)
+#define has_avcc3(data) ((data)->features & FEAT_AVCC3)
+#define has_six_pwm(data) ((data)->features & FEAT_SIX_PWM)
+#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
+#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
struct it87_sio_data {
enum chips type;
@@ -440,7 +489,7 @@ struct it87_sio_data {
* The structure is dynamically allocated.
*/
struct it87_data {
- struct device *hwmon_dev;
+ const struct attribute_group *groups[7];
enum chips type;
u16 features;
u8 peci_mask;
@@ -453,17 +502,21 @@ struct it87_data {
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
- u8 in[10][3]; /* [nr][0]=in, [1]=min, [2]=max */
+ u16 in_internal; /* Bitfield, internal sensors (for labels) */
+ u16 has_in; /* Bitfield, voltage sensors enabled */
+ u8 in[NUM_VIN][3]; /* [nr][0]=in, [1]=min, [2]=max */
u8 has_fan; /* Bitfield, fans enabled */
- u16 fan[6][2]; /* Register values, [nr][0]=fan, [1]=min */
+ u16 fan[NUM_FAN][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
- s8 temp[3][4]; /* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
+ s8 temp[NUM_TEMP][4]; /* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
u8 sensor; /* Register value (IT87_REG_TEMP_ENABLE) */
u8 extra; /* Register value (IT87_REG_TEMP_EXTRA) */
- u8 fan_div[3]; /* Register encoding, shifted right */
+ u8 fan_div[NUM_FAN_DIV];/* Register encoding, shifted right */
+ bool has_vid; /* True if VID supported */
u8 vid; /* Register encoding, combined */
u8 vrm;
u32 alarms; /* Register encoding, combined */
+ bool has_beep; /* true if beep supported */
u8 beeps; /* Register encoding */
u8 fan_main_ctrl; /* Register value */
u8 fan_ctl; /* Register value */
@@ -478,13 +531,14 @@ struct it87_data {
* is no longer needed, but it is still done to keep the driver
* simple.
*/
- u8 pwm_ctrl[3]; /* Register value */
- u8 pwm_duty[3]; /* Manual PWM value set by user */
- u8 pwm_temp_map[3]; /* PWM to temp. chan. mapping (bits 1-0) */
+ u8 has_pwm; /* Bitfield, pwm control enabled */
+ u8 pwm_ctrl[NUM_PWM]; /* Register value */
+ u8 pwm_duty[NUM_PWM]; /* Manual PWM value set by user */
+ u8 pwm_temp_map[NUM_PWM];/* PWM to temp. chan. mapping (bits 1-0) */
/* Automatic fan speed control registers */
- u8 auto_pwm[3][4]; /* [nr][3] is hard-coded */
- s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
+ u8 auto_pwm[NUM_AUTO_PWM][4]; /* [nr][3] is hard-coded */
+ s8 auto_temp[NUM_AUTO_PWM][5]; /* [nr][0] is point1_temp_hyst */
};
static int adc_lsb(const struct it87_data *data, int nr)
@@ -497,7 +551,7 @@ static int adc_lsb(const struct it87_data *data, int nr)
lsb = 109;
else
lsb = 160;
- if (data->in_scaled & (1 << nr))
+ if (data->in_scaled & BIT(nr))
lsb <<= 1;
return lsb;
}
@@ -554,15 +608,16 @@ static int pwm_from_reg(const struct it87_data *data, u8 reg)
return (reg & 0x7f) << 1;
}
-
static int DIV_TO_REG(int val)
{
int answer = 0;
+
while (answer < 7 && (val >>= 1))
answer++;
return answer;
}
-#define DIV_FROM_REG(val) (1 << (val))
+
+#define DIV_FROM_REG(val) BIT(val)
/*
* PWM base frequencies. The frequency has to be divided by either 128 or 256,
@@ -585,32 +640,204 @@ static const unsigned int pwm_freq[8] = {
750000,
};
-static int it87_probe(struct platform_device *pdev);
-static int it87_remove(struct platform_device *pdev);
+/*
+ * Must be called with data->update_lock held, except during initialization.
+ * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
+ * would slow down the IT87 access and should not be necessary.
+ */
+static int it87_read_value(struct it87_data *data, u8 reg)
+{
+ outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+ return inb_p(data->addr + IT87_DATA_REG_OFFSET);
+}
-static int it87_read_value(struct it87_data *data, u8 reg);
-static void it87_write_value(struct it87_data *data, u8 reg, u8 value);
-static struct it87_data *it87_update_device(struct device *dev);
-static int it87_check_pwm(struct device *dev);
-static void it87_init_device(struct platform_device *pdev);
+/*
+ * Must be called with data->update_lock held, except during initialization.
+ * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
+ * would slow down the IT87 access and should not be necessary.
+ */
+static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
+{
+ outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+ outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
+}
+static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
+{
+ data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM[nr]);
+ if (has_newer_autopwm(data)) {
+ data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
+ data->pwm_duty[nr] = it87_read_value(data,
+ IT87_REG_PWM_DUTY[nr]);
+ } else {
+ if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
+ data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
+ else /* Manual mode */
+ data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
+ }
-static struct platform_driver it87_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = it87_probe,
- .remove = it87_remove,
-};
+ if (has_old_autopwm(data)) {
+ int i;
+
+ for (i = 0; i < 5 ; i++)
+ data->auto_temp[nr][i] = it87_read_value(data,
+ IT87_REG_AUTO_TEMP(nr, i));
+ for (i = 0; i < 3 ; i++)
+ data->auto_pwm[nr][i] = it87_read_value(data,
+ IT87_REG_AUTO_PWM(nr, i));
+ } else if (has_newer_autopwm(data)) {
+ int i;
+
+ /*
+ * 0: temperature hysteresis (base + 5)
+ * 1: fan off temperature (base + 0)
+ * 2: fan start temperature (base + 1)
+ * 3: fan max temperature (base + 2)
+ */
+ data->auto_temp[nr][0] =
+ it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 5));
+
+ for (i = 0; i < 3 ; i++)
+ data->auto_temp[nr][i + 1] =
+ it87_read_value(data,
+ IT87_REG_AUTO_TEMP(nr, i));
+ /*
+ * 0: start pwm value (base + 3)
+ * 1: pwm slope (base + 4, 1/8th pwm)
+ */
+ data->auto_pwm[nr][0] =
+ it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 3));
+ data->auto_pwm[nr][1] =
+ it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 4));
+ }
+}
+
+static struct it87_data *it87_update_device(struct device *dev)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) ||
+ !data->valid) {
+ if (update_vbat) {
+ /*
+ * Cleared after each update, so reenable. Value
+ * returned by this read will be previous value
+ */
+ it87_write_value(data, IT87_REG_CONFIG,
+ it87_read_value(data, IT87_REG_CONFIG) | 0x40);
+ }
+ for (i = 0; i < NUM_VIN; i++) {
+ if (!(data->has_in & BIT(i)))
+ continue;
+
+ data->in[i][0] =
+ it87_read_value(data, IT87_REG_VIN[i]);
+
+ /* VBAT and AVCC don't have limit registers */
+ if (i >= NUM_VIN_LIMIT)
+ continue;
+
+ data->in[i][1] =
+ it87_read_value(data, IT87_REG_VIN_MIN(i));
+ data->in[i][2] =
+ it87_read_value(data, IT87_REG_VIN_MAX(i));
+ }
+
+ for (i = 0; i < NUM_FAN; i++) {
+ /* Skip disabled fans */
+ if (!(data->has_fan & BIT(i)))
+ continue;
+
+ data->fan[i][1] =
+ it87_read_value(data, IT87_REG_FAN_MIN[i]);
+ data->fan[i][0] = it87_read_value(data,
+ IT87_REG_FAN[i]);
+ /* Add high byte if in 16-bit mode */
+ if (has_16bit_fans(data)) {
+ data->fan[i][0] |= it87_read_value(data,
+ IT87_REG_FANX[i]) << 8;
+ data->fan[i][1] |= it87_read_value(data,
+ IT87_REG_FANX_MIN[i]) << 8;
+ }
+ }
+ for (i = 0; i < NUM_TEMP; i++) {
+ if (!(data->has_temp & BIT(i)))
+ continue;
+ data->temp[i][0] =
+ it87_read_value(data, IT87_REG_TEMP(i));
+
+ if (has_temp_offset(data) && i < NUM_TEMP_OFFSET)
+ data->temp[i][3] =
+ it87_read_value(data,
+ IT87_REG_TEMP_OFFSET[i]);
+
+ if (i >= NUM_TEMP_LIMIT)
+ continue;
+
+ data->temp[i][1] =
+ it87_read_value(data, IT87_REG_TEMP_LOW(i));
+ data->temp[i][2] =
+ it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+ }
+
+ /* Newer chips don't have clock dividers */
+ if ((data->has_fan & 0x07) && !has_16bit_fans(data)) {
+ i = it87_read_value(data, IT87_REG_FAN_DIV);
+ data->fan_div[0] = i & 0x07;
+ data->fan_div[1] = (i >> 3) & 0x07;
+ data->fan_div[2] = (i & 0x40) ? 3 : 1;
+ }
+
+ data->alarms =
+ it87_read_value(data, IT87_REG_ALARM1) |
+ (it87_read_value(data, IT87_REG_ALARM2) << 8) |
+ (it87_read_value(data, IT87_REG_ALARM3) << 16);
+ data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+
+ data->fan_main_ctrl = it87_read_value(data,
+ IT87_REG_FAN_MAIN_CTRL);
+ data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
+ for (i = 0; i < NUM_PWM; i++) {
+ if (!(data->has_pwm & BIT(i)))
+ continue;
+ it87_update_pwm_ctrl(data, i);
+ }
+
+ data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+ data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
+ /*
+ * The IT8705F does not have VID capability.
+ * The IT8718F and later don't use IT87_REG_VID for the
+ * same purpose.
+ */
+ if (data->type == it8712 || data->type == it8716) {
+ data->vid = it87_read_value(data, IT87_REG_VID);
+ /*
+ * The older IT8712F revisions had only 5 VID pins,
+ * but we assume it is always safe to read 6 bits.
+ */
+ data->vid &= 0x3f;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- int nr = sattr->nr;
+ struct it87_data *data = it87_update_device(dev);
int index = sattr->index;
+ int nr = sattr->nr;
- struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr][index]));
}
@@ -618,10 +845,9 @@ static ssize_t set_in(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- int nr = sattr->nr;
- int index = sattr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int index = sattr->index;
+ int nr = sattr->nr;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -687,8 +913,11 @@ static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 9, 0);
+static SENSOR_DEVICE_ATTR_2(in10_input, S_IRUGO, show_in, NULL, 10, 0);
+static SENSOR_DEVICE_ATTR_2(in11_input, S_IRUGO, show_in, NULL, 11, 0);
+static SENSOR_DEVICE_ATTR_2(in12_input, S_IRUGO, show_in, NULL, 12, 0);
-/* 3 temperatures */
+/* Up to 6 temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -761,6 +990,9 @@ static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
2, 2);
static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
set_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, 0);
static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -771,8 +1003,8 @@ static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
u8 reg = data->sensor; /* In case value is updated while used */
u8 extra = data->extra;
- if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1))
- || (has_temp_old_peci(data, nr) && (extra & 0x80)))
+ if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1)) ||
+ (has_temp_old_peci(data, nr) && (extra & 0x80)))
return sprintf(buf, "6\n"); /* Intel PECI */
if (reg & (1 << nr))
return sprintf(buf, "3\n"); /* thermal diode */
@@ -837,18 +1069,19 @@ static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR, show_temp_type,
static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
set_temp_type, 2);
-/* 3 Fans */
+/* 6 Fans */
static int pwm_mode(const struct it87_data *data, int nr)
{
- int ctrl = data->fan_main_ctrl & (1 << nr);
-
- if (ctrl == 0 && data->type != it8603) /* Full speed */
- return 0;
- if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
- return 2;
- else /* Manual mode */
- return 1;
+ if (data->type != it8603 && nr < 3 && !(data->fan_main_ctrl & BIT(nr)))
+ return 0; /* Full speed */
+ if (data->pwm_ctrl[nr] & 0x80)
+ return 2; /* Automatic mode */
+ if ((data->type == it8603 || nr >= 3) &&
+ data->pwm_duty[nr] == pwm_to_reg(data, 0xff))
+ return 0; /* Full speed */
+
+ return 1; /* Manual mode */
}
static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
@@ -868,39 +1101,49 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
}
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ struct it87_data *data = it87_update_device(dev);
int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
+ return sprintf(buf, "%lu\n", DIV_FROM_REG(data->fan_div[nr]));
}
+
static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ struct it87_data *data = it87_update_device(dev);
int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%d\n", pwm_mode(data, nr));
}
+
static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ struct it87_data *data = it87_update_device(dev);
int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%d\n",
pwm_from_reg(data, data->pwm_duty[nr]));
}
+
static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct it87_data *data = it87_update_device(dev);
- int index = (data->fan_ctl >> 4) & 0x07;
+ int nr = sensor_attr->index;
unsigned int freq;
+ int index;
+
+ if (has_pwm_freq2(data) && nr == 1)
+ index = (data->extra >> 4) & 0x07;
+ else
+ index = (data->fan_ctl >> 4) & 0x07;
freq = pwm_freq[index] / (has_newer_autopwm(data) ? 256 : 128);
@@ -953,12 +1196,11 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
}
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
unsigned long val;
int min;
u8 old;
@@ -1013,6 +1255,11 @@ static int check_trip_points(struct device *dev, int nr)
if (data->auto_pwm[nr][i] > data->auto_pwm[nr][i + 1])
err = -EINVAL;
}
+ } else if (has_newer_autopwm(data)) {
+ for (i = 1; i < 3; i++) {
+ if (data->auto_temp[nr][i] > data->auto_temp[nr][i + 1])
+ err = -EINVAL;
+ }
}
if (err) {
@@ -1023,13 +1270,12 @@ static int check_trip_points(struct device *dev, int nr)
return err;
}
-static ssize_t set_pwm_enable(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
long val;
if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 2)
@@ -1041,21 +1287,30 @@ static ssize_t set_pwm_enable(struct device *dev,
return -EINVAL;
}
- /* IT8603E does not have on/off mode */
- if (val == 0 && data->type == it8603)
- return -EINVAL;
-
mutex_lock(&data->update_lock);
if (val == 0) {
- int tmp;
- /* make sure the fan is on when in on/off mode */
- tmp = it87_read_value(data, IT87_REG_FAN_CTL);
- it87_write_value(data, IT87_REG_FAN_CTL, tmp | (1 << nr));
- /* set on/off mode */
- data->fan_main_ctrl &= ~(1 << nr);
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
- data->fan_main_ctrl);
+ if (nr < 3 && data->type != it8603) {
+ int tmp;
+ /* make sure the fan is on when in on/off mode */
+ tmp = it87_read_value(data, IT87_REG_FAN_CTL);
+ it87_write_value(data, IT87_REG_FAN_CTL, tmp | BIT(nr));
+ /* set on/off mode */
+ data->fan_main_ctrl &= ~BIT(nr);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ data->fan_main_ctrl);
+ } else {
+ /* No on/off mode, set maximum pwm value */
+ data->pwm_duty[nr] = pwm_to_reg(data, 0xff);
+ it87_write_value(data, IT87_REG_PWM_DUTY[nr],
+ data->pwm_duty[nr]);
+ /* and set manual mode */
+ data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
+ data->pwm_temp_map[nr] :
+ data->pwm_duty[nr];
+ it87_write_value(data, IT87_REG_PWM[nr],
+ data->pwm_ctrl[nr]);
+ }
} else {
if (val == 1) /* Manual mode */
data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
@@ -1063,11 +1318,11 @@ static ssize_t set_pwm_enable(struct device *dev,
data->pwm_duty[nr];
else /* Automatic mode */
data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
- it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+ it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
- if (data->type != it8603) {
+ if (data->type != it8603 && nr < 3) {
/* set SmartGuardian mode */
- data->fan_main_ctrl |= (1 << nr);
+ data->fan_main_ctrl |= BIT(nr);
it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
data->fan_main_ctrl);
}
@@ -1076,13 +1331,13 @@ static ssize_t set_pwm_enable(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
+
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
long val;
if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
@@ -1099,7 +1354,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return -EBUSY;
}
data->pwm_duty[nr] = pwm_to_reg(data, val);
- it87_write_value(data, IT87_REG_PWM_DUTY(nr),
+ it87_write_value(data, IT87_REG_PWM_DUTY[nr],
data->pwm_duty[nr]);
} else {
data->pwm_duty[nr] = pwm_to_reg(data, val);
@@ -1109,17 +1364,20 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
*/
if (!(data->pwm_ctrl[nr] & 0x80)) {
data->pwm_ctrl[nr] = data->pwm_duty[nr];
- it87_write_value(data, IT87_REG_PWM(nr),
+ it87_write_value(data, IT87_REG_PWM[nr],
data->pwm_ctrl[nr]);
}
}
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_pwm_freq(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+
+static ssize_t set_pwm_freq(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
unsigned long val;
int i;
@@ -1131,63 +1389,66 @@ static ssize_t set_pwm_freq(struct device *dev,
/* Search for the nearest available frequency */
for (i = 0; i < 7; i++) {
- if (val > (pwm_freq[i] + pwm_freq[i+1]) / 2)
+ if (val > (pwm_freq[i] + pwm_freq[i + 1]) / 2)
break;
}
mutex_lock(&data->update_lock);
- data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
- data->fan_ctl |= i << 4;
- it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
+ if (nr == 0) {
+ data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
+ data->fan_ctl |= i << 4;
+ it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
+ } else {
+ data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x8f;
+ data->extra |= i << 4;
+ it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
+ }
mutex_unlock(&data->update_lock);
return count;
}
+
static ssize_t show_pwm_temp_map(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = it87_update_device(dev);
+ int nr = sensor_attr->index;
int map;
- if (data->pwm_temp_map[nr] < 3)
- map = 1 << data->pwm_temp_map[nr];
- else
- map = 0; /* Should never happen */
- return sprintf(buf, "%d\n", map);
+ map = data->pwm_temp_map[nr];
+ if (map >= 3)
+ map = 0; /* Should never happen */
+ if (nr >= 3) /* pwm channels 3..6 map to temp4..6 */
+ map += 3;
+
+ return sprintf(buf, "%d\n", (int)BIT(map));
}
+
static ssize_t set_pwm_temp_map(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
long val;
u8 reg;
- /*
- * This check can go away if we ever support automatic fan speed
- * control on newer chips.
- */
- if (!has_old_autopwm(data)) {
- dev_notice(dev, "Mapping change disabled for safety reasons\n");
- return -EINVAL;
- }
-
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
+ if (nr >= 3)
+ val -= 3;
+
switch (val) {
- case (1 << 0):
+ case BIT(0):
reg = 0x00;
break;
- case (1 << 1):
+ case BIT(1):
reg = 0x01;
break;
- case (1 << 2):
+ case BIT(2):
reg = 0x02;
break;
default:
@@ -1202,14 +1463,14 @@ static ssize_t set_pwm_temp_map(struct device *dev,
*/
if (data->pwm_ctrl[nr] & 0x80) {
data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
- it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+ it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
}
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_auto_pwm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_auto_pwm(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
@@ -1221,14 +1482,15 @@ static ssize_t show_auto_pwm(struct device *dev,
pwm_from_reg(data, data->auto_pwm[nr][point]));
}
-static ssize_t set_auto_pwm(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_auto_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int point = sensor_attr->index;
+ int regaddr;
long val;
if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
@@ -1236,26 +1498,65 @@ static ssize_t set_auto_pwm(struct device *dev,
mutex_lock(&data->update_lock);
data->auto_pwm[nr][point] = pwm_to_reg(data, val);
- it87_write_value(data, IT87_REG_AUTO_PWM(nr, point),
- data->auto_pwm[nr][point]);
+ if (has_newer_autopwm(data))
+ regaddr = IT87_REG_AUTO_TEMP(nr, 3);
+ else
+ regaddr = IT87_REG_AUTO_PWM(nr, point);
+ it87_write_value(data, regaddr, data->auto_pwm[nr][point]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_auto_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_auto_pwm_slope(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct it87_data *data = it87_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+
+ return sprintf(buf, "%d\n", data->auto_pwm[nr][1] & 0x7f);
+}
+
+static ssize_t set_auto_pwm_slope(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ unsigned long val;
+
+ if (kstrtoul(buf, 10, &val) < 0 || val > 127)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->auto_pwm[nr][1] = (data->auto_pwm[nr][1] & 0x80) | val;
+ it87_write_value(data, IT87_REG_AUTO_TEMP(nr, 4),
+ data->auto_pwm[nr][1]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_auto_temp(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int point = sensor_attr->index;
+ int reg;
+
+ if (has_old_autopwm(data) || point)
+ reg = data->auto_temp[nr][point];
+ else
+ reg = data->auto_temp[nr][1] - (data->auto_temp[nr][0] & 0x1f);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->auto_temp[nr][point]));
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(reg));
}
-static ssize_t set_auto_temp(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_auto_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr =
@@ -1263,14 +1564,24 @@ static ssize_t set_auto_temp(struct device *dev,
int nr = sensor_attr->nr;
int point = sensor_attr->index;
long val;
+ int reg;
if (kstrtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
return -EINVAL;
mutex_lock(&data->update_lock);
- data->auto_temp[nr][point] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_AUTO_TEMP(nr, point),
- data->auto_temp[nr][point]);
+ if (has_newer_autopwm(data) && !point) {
+ reg = data->auto_temp[nr][1] - TEMP_TO_REG(val);
+ reg = clamp_val(reg, 0, 0x1f) | (data->auto_temp[nr][0] & 0xe0);
+ data->auto_temp[nr][0] = reg;
+ it87_write_value(data, IT87_REG_AUTO_TEMP(nr, 5), reg);
+ } else {
+ reg = TEMP_TO_REG(val);
+ data->auto_temp[nr][point] = reg;
+ if (has_newer_autopwm(data))
+ point--;
+ it87_write_value(data, IT87_REG_AUTO_TEMP(nr, point), reg);
+ }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1308,8 +1619,9 @@ static SENSOR_DEVICE_ATTR_2(fan6_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
-static DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq);
-static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq,
+ set_pwm_freq, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO,
show_pwm_temp_map, set_pwm_temp_map, 0);
static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
show_auto_pwm, set_auto_pwm, 0, 0);
@@ -1329,12 +1641,16 @@ static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 0, 3);
static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 0, 4);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 0);
static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable, 1);
static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
-static DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, NULL);
-static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, set_pwm_freq, 1);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO,
show_pwm_temp_map, set_pwm_temp_map, 1);
static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
show_auto_pwm, set_auto_pwm, 1, 0);
@@ -1354,12 +1670,16 @@ static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 1, 3);
static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 1, 4);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 1);
static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable, 2);
static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 2);
-static DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL);
-static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO,
show_pwm_temp_map, set_pwm_temp_map, 2);
static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
show_auto_pwm, set_auto_pwm, 2, 0);
@@ -1379,30 +1699,94 @@ static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 2, 3);
static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 2, 4);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 0);
+static SENSOR_DEVICE_ATTR(pwm3_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 2);
+
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 3);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 3);
+static SENSOR_DEVICE_ATTR(pwm4_freq, S_IRUGO, show_pwm_freq, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm4_auto_channels_temp, S_IRUGO,
+ show_pwm_temp_map, set_pwm_temp_map, 3);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 3, 0);
+static SENSOR_DEVICE_ATTR(pwm4_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 3);
+
+static SENSOR_DEVICE_ATTR(pwm5_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 4);
+static SENSOR_DEVICE_ATTR(pwm5, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 4);
+static SENSOR_DEVICE_ATTR(pwm5_freq, S_IRUGO, show_pwm_freq, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm5_auto_channels_temp, S_IRUGO,
+ show_pwm_temp_map, set_pwm_temp_map, 4);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 4, 0);
+static SENSOR_DEVICE_ATTR(pwm5_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 4);
+
+static SENSOR_DEVICE_ATTR(pwm6_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 5);
+static SENSOR_DEVICE_ATTR(pwm6, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 5);
+static SENSOR_DEVICE_ATTR(pwm6_freq, S_IRUGO, show_pwm_freq, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm6_auto_channels_temp, S_IRUGO,
+ show_pwm_temp_map, set_pwm_temp_map, 5);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 5, 0);
+static SENSOR_DEVICE_ATTR(pwm6_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 5);
/* Alarms */
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
+
return sprintf(buf, "%u\n", data->alarms);
}
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- int bitnr = to_sensor_dev_attr(attr)->index;
struct it87_data *data = it87_update_device(dev);
+ int bitnr = to_sensor_dev_attr(attr)->index;
+
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static ssize_t clear_intrusion(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t clear_intrusion(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
- long val;
int config;
+ long val;
if (kstrtol(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
@@ -1412,7 +1796,7 @@ static ssize_t clear_intrusion(struct device *dev, struct device_attribute
if (config < 0) {
count = config;
} else {
- config |= 1 << 5;
+ config |= BIT(5);
it87_write_value(data, IT87_REG_CONFIG, config);
/* Invalidate cache to force re-read */
data->valid = 0;
@@ -1443,29 +1827,30 @@ static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
show_alarm, clear_intrusion, 4);
static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- int bitnr = to_sensor_dev_attr(attr)->index;
struct it87_data *data = it87_update_device(dev);
+ int bitnr = to_sensor_dev_attr(attr)->index;
+
return sprintf(buf, "%u\n", (data->beeps >> bitnr) & 1);
}
+
static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (kstrtol(buf, 10, &val) < 0
- || (val != 0 && val != 1))
+ if (kstrtol(buf, 10, &val) < 0 || (val != 0 && val != 1))
return -EINVAL;
mutex_lock(&data->update_lock);
data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
if (val)
- data->beeps |= (1 << bitnr);
+ data->beeps |= BIT(bitnr);
else
- data->beeps &= ~(1 << bitnr);
+ data->beeps &= ~BIT(bitnr);
it87_write_value(data, IT87_REG_BEEP_ENABLE, data->beeps);
mutex_unlock(&data->update_lock);
return count;
@@ -1493,13 +1878,15 @@ static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO, show_beep, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO, show_beep, NULL, 2);
static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct it87_data *data = dev_get_drvdata(dev);
+
return sprintf(buf, "%u\n", data->vrm);
}
+
static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -1514,15 +1901,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
+
+ return sprintf(buf, "%ld\n", (long)vid_from_reg(data->vid, data->vrm));
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
static ssize_t show_label(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
static const char * const labels[] = {
"+5V",
@@ -1548,227 +1936,348 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
-/* special AVCC3 IT8603E in9 */
+/* AVCC3 */
static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
-static ssize_t show_name(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static umode_t it87_in_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
struct it87_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", data->name);
+ int i = index / 5; /* voltage index */
+ int a = index % 5; /* attribute index */
+
+ if (index >= 40) { /* in8 and higher only have input attributes */
+ i = index - 40 + 8;
+ a = 0;
+ }
+
+ if (!(data->has_in & BIT(i)))
+ return 0;
+
+ if (a == 4 && !data->has_beep)
+ return 0;
+
+ return attr->mode;
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static struct attribute *it87_attributes_in[10][5] = {
-{
+static struct attribute *it87_attributes_in[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in0_beep.dev_attr.attr, /* 4 */
+
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in1_beep.dev_attr.attr, /* 9 */
+
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in2_beep.dev_attr.attr, /* 14 */
+
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in3_beep.dev_attr.attr, /* 19 */
+
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in4_beep.dev_attr.attr, /* 24 */
+
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in5_beep.dev_attr.attr, /* 29 */
+
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in6_min.dev_attr.attr,
&sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in6_beep.dev_attr.attr, /* 34 */
+
&sensor_dev_attr_in7_input.dev_attr.attr,
&sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in7_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_in8_input.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_in9_input.dev_attr.attr,
- NULL
-} };
-
-static const struct attribute_group it87_group_in[10] = {
- { .attrs = it87_attributes_in[0] },
- { .attrs = it87_attributes_in[1] },
- { .attrs = it87_attributes_in[2] },
- { .attrs = it87_attributes_in[3] },
- { .attrs = it87_attributes_in[4] },
- { .attrs = it87_attributes_in[5] },
- { .attrs = it87_attributes_in[6] },
- { .attrs = it87_attributes_in[7] },
- { .attrs = it87_attributes_in[8] },
- { .attrs = it87_attributes_in[9] },
+ &sensor_dev_attr_in7_beep.dev_attr.attr, /* 39 */
+
+ &sensor_dev_attr_in8_input.dev_attr.attr, /* 40 */
+ &sensor_dev_attr_in9_input.dev_attr.attr, /* 41 */
+ &sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */
+ &sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */
+ &sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */
};
-static struct attribute *it87_attributes_temp[3][6] = {
+static const struct attribute_group it87_group_in = {
+ .attrs = it87_attributes_in,
+ .is_visible = it87_in_is_visible,
+};
+
+static umode_t it87_temp_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 7; /* temperature index */
+ int a = index % 7; /* attribute index */
+
+ if (index >= 21) {
+ i = index - 21 + 3;
+ a = 0;
+ }
+
+ if (!(data->has_temp & BIT(i)))
+ return 0;
+
+ if (a == 5 && !has_temp_offset(data))
+ return 0;
+
+ if (a == 6 && !data->has_beep)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_temp[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_type.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
- NULL
-} , {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_offset.dev_attr.attr, /* 5 */
+ &sensor_dev_attr_temp1_beep.dev_attr.attr, /* 6 */
+
+ &sensor_dev_attr_temp2_input.dev_attr.attr, /* 7 */
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_type.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
- NULL
-} , {
- &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ &sensor_dev_attr_temp2_beep.dev_attr.attr,
+
+ &sensor_dev_attr_temp3_input.dev_attr.attr, /* 14 */
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_type.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
- NULL
-} };
+ &sensor_dev_attr_temp3_offset.dev_attr.attr,
+ &sensor_dev_attr_temp3_beep.dev_attr.attr,
-static const struct attribute_group it87_group_temp[3] = {
- { .attrs = it87_attributes_temp[0] },
- { .attrs = it87_attributes_temp[1] },
- { .attrs = it87_attributes_temp[2] },
+ &sensor_dev_attr_temp4_input.dev_attr.attr, /* 21 */
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ NULL
};
-static struct attribute *it87_attributes_temp_offset[] = {
- &sensor_dev_attr_temp1_offset.dev_attr.attr,
- &sensor_dev_attr_temp2_offset.dev_attr.attr,
- &sensor_dev_attr_temp3_offset.dev_attr.attr,
+static const struct attribute_group it87_group_temp = {
+ .attrs = it87_attributes_temp,
+ .is_visible = it87_temp_is_visible,
};
+static umode_t it87_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+
+ if ((index == 2 || index == 3) && !data->has_vid)
+ return 0;
+
+ if (index > 3 && !(data->in_internal & BIT(index - 4)))
+ return 0;
+
+ return attr->mode;
+}
+
static struct attribute *it87_attributes[] = {
&dev_attr_alarms.attr,
&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
- &dev_attr_name.attr,
+ &dev_attr_vrm.attr, /* 2 */
+ &dev_attr_cpu0_vid.attr, /* 3 */
+ &sensor_dev_attr_in3_label.dev_attr.attr, /* 4 .. 7 */
+ &sensor_dev_attr_in7_label.dev_attr.attr,
+ &sensor_dev_attr_in8_label.dev_attr.attr,
+ &sensor_dev_attr_in9_label.dev_attr.attr,
NULL
};
static const struct attribute_group it87_group = {
.attrs = it87_attributes,
+ .is_visible = it87_is_visible,
};
-static struct attribute *it87_attributes_in_beep[] = {
- &sensor_dev_attr_in0_beep.dev_attr.attr,
- &sensor_dev_attr_in1_beep.dev_attr.attr,
- &sensor_dev_attr_in2_beep.dev_attr.attr,
- &sensor_dev_attr_in3_beep.dev_attr.attr,
- &sensor_dev_attr_in4_beep.dev_attr.attr,
- &sensor_dev_attr_in5_beep.dev_attr.attr,
- &sensor_dev_attr_in6_beep.dev_attr.attr,
- &sensor_dev_attr_in7_beep.dev_attr.attr,
- NULL,
- NULL,
-};
+static umode_t it87_fan_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 5; /* fan index */
+ int a = index % 5; /* attribute index */
-static struct attribute *it87_attributes_temp_beep[] = {
- &sensor_dev_attr_temp1_beep.dev_attr.attr,
- &sensor_dev_attr_temp2_beep.dev_attr.attr,
- &sensor_dev_attr_temp3_beep.dev_attr.attr,
-};
+ if (index >= 15) { /* fan 4..6 don't have divisor attributes */
+ i = (index - 15) / 4 + 3;
+ a = (index - 15) % 4;
+ }
-static struct attribute *it87_attributes_fan[6][3+1] = { {
+ if (!(data->has_fan & BIT(i)))
+ return 0;
+
+ if (a == 3) { /* beep */
+ if (!data->has_beep)
+ return 0;
+ /* first fan beep attribute is writable */
+ if (i == __ffs(data->has_fan))
+ return attr->mode | S_IWUSR;
+ }
+
+ if (a == 4 && has_16bit_fans(data)) /* divisor */
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_fan[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_fan1_beep.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_fan1_div.dev_attr.attr, /* 4 */
+
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_fan2_beep.dev_attr.attr,
+ &sensor_dev_attr_fan2_div.dev_attr.attr, /* 9 */
+
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_beep.dev_attr.attr,
+ &sensor_dev_attr_fan3_div.dev_attr.attr, /* 14 */
+
+ &sensor_dev_attr_fan4_input.dev_attr.attr, /* 15 */
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_beep.dev_attr.attr,
+
+ &sensor_dev_attr_fan5_input.dev_attr.attr, /* 19 */
&sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan6_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_beep.dev_attr.attr,
+
+ &sensor_dev_attr_fan6_input.dev_attr.attr, /* 23 */
&sensor_dev_attr_fan6_min.dev_attr.attr,
&sensor_dev_attr_fan6_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan6_beep.dev_attr.attr,
NULL
-} };
-
-static const struct attribute_group it87_group_fan[6] = {
- { .attrs = it87_attributes_fan[0] },
- { .attrs = it87_attributes_fan[1] },
- { .attrs = it87_attributes_fan[2] },
- { .attrs = it87_attributes_fan[3] },
- { .attrs = it87_attributes_fan[4] },
- { .attrs = it87_attributes_fan[5] },
};
-static const struct attribute *it87_attributes_fan_div[] = {
- &sensor_dev_attr_fan1_div.dev_attr.attr,
- &sensor_dev_attr_fan2_div.dev_attr.attr,
- &sensor_dev_attr_fan3_div.dev_attr.attr,
+static const struct attribute_group it87_group_fan = {
+ .attrs = it87_attributes_fan,
+ .is_visible = it87_fan_is_visible,
};
-static struct attribute *it87_attributes_pwm[3][4+1] = { {
+static umode_t it87_pwm_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 4; /* pwm index */
+ int a = index % 4; /* attribute index */
+
+ if (!(data->has_pwm & BIT(i)))
+ return 0;
+
+ /* pwmX_auto_channels_temp is only writable if auto pwm is supported */
+ if (a == 3 && (has_old_autopwm(data) || has_newer_autopwm(data)))
+ return attr->mode | S_IWUSR;
+
+ /* pwm2_freq is writable if there are two pwm frequency selects */
+ if (has_pwm_freq2(data) && i == 1 && a == 2)
+ return attr->mode | S_IWUSR;
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_pwm[] = {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
- &dev_attr_pwm1_freq.attr,
+ &sensor_dev_attr_pwm1_freq.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
- NULL
-}, {
+
&sensor_dev_attr_pwm2_enable.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
- &dev_attr_pwm2_freq.attr,
+ &sensor_dev_attr_pwm2_freq.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
- NULL
-}, {
+
&sensor_dev_attr_pwm3_enable.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
- &dev_attr_pwm3_freq.attr,
+ &sensor_dev_attr_pwm3_freq.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm4.dev_attr.attr,
+ &sensor_dev_attr_pwm4_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_channels_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm5_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm5.dev_attr.attr,
+ &sensor_dev_attr_pwm5_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_channels_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm6_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm6.dev_attr.attr,
+ &sensor_dev_attr_pwm6_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_channels_temp.dev_attr.attr,
+
NULL
-} };
+};
-static const struct attribute_group it87_group_pwm[3] = {
- { .attrs = it87_attributes_pwm[0] },
- { .attrs = it87_attributes_pwm[1] },
- { .attrs = it87_attributes_pwm[2] },
+static const struct attribute_group it87_group_pwm = {
+ .attrs = it87_attributes_pwm,
+ .is_visible = it87_pwm_is_visible,
};
-static struct attribute *it87_attributes_autopwm[3][9+1] = { {
+static umode_t it87_auto_pwm_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 11; /* pwm index */
+ int a = index % 11; /* attribute index */
+
+ if (index >= 33) { /* pwm 4..6 */
+ i = (index - 33) / 6 + 3;
+ a = (index - 33) % 6 + 4;
+ }
+
+ if (!(data->has_pwm & BIT(i)))
+ return 0;
+
+ if (has_newer_autopwm(data)) {
+ if (a < 4) /* no auto point pwm */
+ return 0;
+ if (a == 8) /* no auto_point4 */
+ return 0;
+ }
+ if (has_old_autopwm(data)) {
+ if (a >= 9) /* no pwm_auto_start, pwm_auto_slope */
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_auto_pwm[] = {
&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
@@ -1778,9 +2287,10 @@ static struct attribute *it87_attributes_autopwm[3][9+1] = { {
&sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, /* 11 */
&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
@@ -1789,9 +2299,10 @@ static struct attribute *it87_attributes_autopwm[3][9+1] = { {
&sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, /* 22 */
&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
@@ -1800,61 +2311,53 @@ static struct attribute *it87_attributes_autopwm[3][9+1] = { {
&sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
- NULL
-} };
-
-static const struct attribute_group it87_group_autopwm[3] = {
- { .attrs = it87_attributes_autopwm[0] },
- { .attrs = it87_attributes_autopwm[1] },
- { .attrs = it87_attributes_autopwm[2] },
-};
+ &sensor_dev_attr_pwm3_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_auto_point1_temp.dev_attr.attr, /* 33 */
+ &sensor_dev_attr_pwm4_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm5_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm6_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_slope.dev_attr.attr,
-static struct attribute *it87_attributes_fan_beep[] = {
- &sensor_dev_attr_fan1_beep.dev_attr.attr,
- &sensor_dev_attr_fan2_beep.dev_attr.attr,
- &sensor_dev_attr_fan3_beep.dev_attr.attr,
- &sensor_dev_attr_fan4_beep.dev_attr.attr,
- &sensor_dev_attr_fan5_beep.dev_attr.attr,
- &sensor_dev_attr_fan6_beep.dev_attr.attr,
-};
-
-static struct attribute *it87_attributes_vid[] = {
- &dev_attr_vrm.attr,
- &dev_attr_cpu0_vid.attr,
- NULL
-};
-
-static const struct attribute_group it87_group_vid = {
- .attrs = it87_attributes_vid,
-};
-
-static struct attribute *it87_attributes_label[] = {
- &sensor_dev_attr_in3_label.dev_attr.attr,
- &sensor_dev_attr_in7_label.dev_attr.attr,
- &sensor_dev_attr_in8_label.dev_attr.attr,
- &sensor_dev_attr_in9_label.dev_attr.attr,
- NULL
+ NULL,
};
-static const struct attribute_group it87_group_label = {
- .attrs = it87_attributes_label,
+static const struct attribute_group it87_group_auto_pwm = {
+ .attrs = it87_attributes_auto_pwm,
+ .is_visible = it87_auto_pwm_is_visible,
};
/* SuperIO detection - will change isa_address if a chip is found */
-static int __init it87_find(unsigned short *address,
- struct it87_sio_data *sio_data)
+static int __init it87_find(int sioaddr, unsigned short *address,
+ struct it87_sio_data *sio_data)
{
int err;
u16 chip_type;
const char *board_vendor, *board_name;
const struct it87_devices *config;
- err = superio_enter();
+ err = superio_enter(sioaddr);
if (err)
return err;
err = -ENODEV;
- chip_type = force_id ? force_id : superio_inw(DEVID);
+ chip_type = force_id ? force_id : superio_inw(sioaddr, DEVID);
switch (chip_type) {
case IT8705F_DEVID:
@@ -1910,6 +2413,9 @@ static int __init it87_find(unsigned short *address,
case IT8620E_DEVID:
sio_data->type = it8620;
break;
+ case IT8628E_DEVID:
+ sio_data->type = it8628;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1917,20 +2423,20 @@ static int __init it87_find(unsigned short *address,
goto exit;
}
- superio_select(PME);
- if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
+ superio_select(sioaddr, PME);
+ if (!(superio_inb(sioaddr, IT87_ACT_REG) & 0x01)) {
pr_info("Device not activated, skipping\n");
goto exit;
}
- *address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1);
+ *address = superio_inw(sioaddr, IT87_BASE_REG) & ~(IT87_EXTENT - 1);
if (*address == 0) {
pr_info("Base address not set, skipping\n");
goto exit;
}
err = 0;
- sio_data->revision = superio_inb(DEVREV) & 0x0f;
+ sio_data->revision = superio_inb(sioaddr, DEVREV) & 0x0f;
pr_info("Found IT%04x%s chip at 0x%x, revision %d\n", chip_type,
it87_devices[sio_data->type].suffix,
*address, sio_data->revision);
@@ -1939,14 +2445,19 @@ static int __init it87_find(unsigned short *address,
/* in7 (VSB or VCCH5V) is always internal on some chips */
if (has_in7_internal(config))
- sio_data->internal |= (1 << 1);
+ sio_data->internal |= BIT(1);
/* in8 (Vbat) is always internal */
- sio_data->internal |= (1 << 2);
+ sio_data->internal |= BIT(2);
- /* Only the IT8603E has in9 */
- if (sio_data->type != it8603)
- sio_data->skip_in |= (1 << 9);
+ /* in9 (AVCC3), always internal if supported */
+ if (has_avcc3(config))
+ sio_data->internal |= BIT(3); /* in9 is AVCC */
+ else
+ sio_data->skip_in |= BIT(9);
+
+ if (!has_six_pwm(config))
+ sio_data->skip_pwm |= BIT(3) | BIT(4) | BIT(5);
if (!has_vid(config))
sio_data->skip_vid = 1;
@@ -1954,45 +2465,46 @@ static int __init it87_find(unsigned short *address,
/* Read GPIO config and VID value from LDN 7 (GPIO) */
if (sio_data->type == it87) {
/* The IT8705F has a different LD number for GPIO */
- superio_select(5);
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ superio_select(sioaddr, 5);
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else if (sio_data->type == it8783) {
int reg25, reg27, reg2a, reg2c, regef;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
- reg25 = superio_inb(IT87_SIO_GPIO1_REG);
- reg27 = superio_inb(IT87_SIO_GPIO3_REG);
- reg2a = superio_inb(IT87_SIO_PINX1_REG);
- reg2c = superio_inb(IT87_SIO_PINX2_REG);
- regef = superio_inb(IT87_SIO_SPI_REG);
+ reg25 = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+ reg27 = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+ reg2a = superio_inb(sioaddr, IT87_SIO_PINX1_REG);
+ reg2c = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+ regef = superio_inb(sioaddr, IT87_SIO_SPI_REG);
/* Check if fan3 is there or not */
- if ((reg27 & (1 << 0)) || !(reg2c & (1 << 2)))
- sio_data->skip_fan |= (1 << 2);
- if ((reg25 & (1 << 4))
- || (!(reg2a & (1 << 1)) && (regef & (1 << 0))))
- sio_data->skip_pwm |= (1 << 2);
+ if ((reg27 & BIT(0)) || !(reg2c & BIT(2)))
+ sio_data->skip_fan |= BIT(2);
+ if ((reg25 & BIT(4)) ||
+ (!(reg2a & BIT(1)) && (regef & BIT(0))))
+ sio_data->skip_pwm |= BIT(2);
/* Check if fan2 is there or not */
- if (reg27 & (1 << 7))
- sio_data->skip_fan |= (1 << 1);
- if (reg27 & (1 << 3))
- sio_data->skip_pwm |= (1 << 1);
+ if (reg27 & BIT(7))
+ sio_data->skip_fan |= BIT(1);
+ if (reg27 & BIT(3))
+ sio_data->skip_pwm |= BIT(1);
/* VIN5 */
- if ((reg27 & (1 << 0)) || (reg2c & (1 << 2)))
- sio_data->skip_in |= (1 << 5); /* No VIN5 */
+ if ((reg27 & BIT(0)) || (reg2c & BIT(2)))
+ sio_data->skip_in |= BIT(5); /* No VIN5 */
/* VIN6 */
- if (reg27 & (1 << 1))
- sio_data->skip_in |= (1 << 6); /* No VIN6 */
+ if (reg27 & BIT(1))
+ sio_data->skip_in |= BIT(6); /* No VIN6 */
/*
* VIN7
* Does not depend on bit 2 of Reg2C, contrary to datasheet.
*/
- if (reg27 & (1 << 2)) {
+ if (reg27 & BIT(2)) {
/*
* The data sheet is a bit unclear regarding the
* internal voltage divider for VCCH5V. It says
@@ -2006,81 +2518,121 @@ static int __init it87_find(unsigned short *address,
* not the case, and ask the user to report if the
* resulting voltage is sane.
*/
- if (!(reg2c & (1 << 1))) {
- reg2c |= (1 << 1);
- superio_outb(IT87_SIO_PINX2_REG, reg2c);
+ if (!(reg2c & BIT(1))) {
+ reg2c |= BIT(1);
+ superio_outb(sioaddr, IT87_SIO_PINX2_REG,
+ reg2c);
pr_notice("Routing internal VCCH5V to in7.\n");
}
pr_notice("in7 routed to internal voltage divider, with external pin disabled.\n");
pr_notice("Please report if it displays a reasonable voltage.\n");
}
- if (reg2c & (1 << 0))
- sio_data->internal |= (1 << 0);
- if (reg2c & (1 << 1))
- sio_data->internal |= (1 << 1);
+ if (reg2c & BIT(0))
+ sio_data->internal |= BIT(0);
+ if (reg2c & BIT(1))
+ sio_data->internal |= BIT(1);
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else if (sio_data->type == it8603) {
int reg27, reg29;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
- reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+ reg27 = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
/* Check if fan3 is there or not */
- if (reg27 & (1 << 6))
- sio_data->skip_pwm |= (1 << 2);
- if (reg27 & (1 << 7))
- sio_data->skip_fan |= (1 << 2);
+ if (reg27 & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg27 & BIT(7))
+ sio_data->skip_fan |= BIT(2);
/* Check if fan2 is there or not */
- reg29 = superio_inb(IT87_SIO_GPIO5_REG);
- if (reg29 & (1 << 1))
- sio_data->skip_pwm |= (1 << 1);
- if (reg29 & (1 << 2))
- sio_data->skip_fan |= (1 << 1);
-
- sio_data->skip_in |= (1 << 5); /* No VIN5 */
- sio_data->skip_in |= (1 << 6); /* No VIN6 */
-
- sio_data->internal |= (1 << 3); /* in9 is AVCC */
-
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
- } else if (sio_data->type == it8620) {
+ reg29 = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg29 & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg29 & BIT(2))
+ sio_data->skip_fan |= BIT(1);
+
+ sio_data->skip_in |= BIT(5); /* No VIN5 */
+ sio_data->skip_in |= BIT(6); /* No VIN6 */
+
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ } else if (sio_data->type == it8620 || sio_data->type == it8628) {
int reg;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
+
+ /* Check for pwm5 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(4);
/* Check for fan4, fan5 */
- reg = superio_inb(IT87_SIO_GPIO2_REG);
- if (!(reg & (1 << 5)))
- sio_data->skip_fan |= (1 << 3);
- if (!(reg & (1 << 4)))
- sio_data->skip_fan |= (1 << 4);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO2_REG);
+ if (!(reg & BIT(5)))
+ sio_data->skip_fan |= BIT(3);
+ if (!(reg & BIT(4)))
+ sio_data->skip_fan |= BIT(4);
/* Check for pwm3, fan3 */
- reg = superio_inb(IT87_SIO_GPIO3_REG);
- if (reg & (1 << 6))
- sio_data->skip_pwm |= (1 << 2);
- if (reg & (1 << 7))
- sio_data->skip_fan |= (1 << 2);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg & BIT(7))
+ sio_data->skip_fan |= BIT(2);
+
+ /* Check for pwm4 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO4_REG);
+ if (!(reg & BIT(2)))
+ sio_data->skip_pwm |= BIT(3);
/* Check for pwm2, fan2 */
- reg = superio_inb(IT87_SIO_GPIO5_REG);
- if (reg & (1 << 1))
- sio_data->skip_pwm |= (1 << 1);
- if (reg & (1 << 2))
- sio_data->skip_fan |= (1 << 1);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg & BIT(2))
+ sio_data->skip_fan |= BIT(1);
+ /* Check for pwm6, fan6 */
+ if (!(reg & BIT(7))) {
+ sio_data->skip_pwm |= BIT(5);
+ sio_data->skip_fan |= BIT(5);
+ }
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else {
int reg;
bool uart6;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
+
+ /* Check for fan4, fan5 */
+ if (has_five_fans(config)) {
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO2_REG);
+ switch (sio_data->type) {
+ case it8718:
+ if (reg & BIT(5))
+ sio_data->skip_fan |= BIT(3);
+ if (reg & BIT(4))
+ sio_data->skip_fan |= BIT(4);
+ break;
+ case it8720:
+ case it8721:
+ case it8728:
+ if (!(reg & BIT(5)))
+ sio_data->skip_fan |= BIT(3);
+ if (!(reg & BIT(4)))
+ sio_data->skip_fan |= BIT(4);
+ break;
+ default:
+ break;
+ }
+ }
- reg = superio_inb(IT87_SIO_GPIO3_REG);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
if (!sio_data->skip_vid) {
/* We need at least 4 VID pins */
if (reg & 0x0f) {
@@ -2090,25 +2642,26 @@ static int __init it87_find(unsigned short *address,
}
/* Check if fan3 is there or not */
- if (reg & (1 << 6))
- sio_data->skip_pwm |= (1 << 2);
- if (reg & (1 << 7))
- sio_data->skip_fan |= (1 << 2);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg & BIT(7))
+ sio_data->skip_fan |= BIT(2);
/* Check if fan2 is there or not */
- reg = superio_inb(IT87_SIO_GPIO5_REG);
- if (reg & (1 << 1))
- sio_data->skip_pwm |= (1 << 1);
- if (reg & (1 << 2))
- sio_data->skip_fan |= (1 << 1);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg & BIT(2))
+ sio_data->skip_fan |= BIT(1);
- if ((sio_data->type == it8718 || sio_data->type == it8720)
- && !(sio_data->skip_vid))
- sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
+ if ((sio_data->type == it8718 || sio_data->type == it8720) &&
+ !(sio_data->skip_vid))
+ sio_data->vid_value = superio_inb(sioaddr,
+ IT87_SIO_VID_REG);
- reg = superio_inb(IT87_SIO_PINX2_REG);
+ reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
- uart6 = sio_data->type == it8782 && (reg & (1 << 2));
+ uart6 = sio_data->type == it8782 && (reg & BIT(2));
/*
* The IT8720F has no VIN7 pin, so VCCH should always be
@@ -2124,15 +2677,15 @@ static int __init it87_find(unsigned short *address,
* If UART6 is enabled, re-route VIN7 to the internal divider
* if that is not already the case.
*/
- if ((sio_data->type == it8720 || uart6) && !(reg & (1 << 1))) {
- reg |= (1 << 1);
- superio_outb(IT87_SIO_PINX2_REG, reg);
+ if ((sio_data->type == it8720 || uart6) && !(reg & BIT(1))) {
+ reg |= BIT(1);
+ superio_outb(sioaddr, IT87_SIO_PINX2_REG, reg);
pr_notice("Routing internal VCCH to in7\n");
}
- if (reg & (1 << 0))
- sio_data->internal |= (1 << 0);
- if (reg & (1 << 1))
- sio_data->internal |= (1 << 1);
+ if (reg & BIT(0))
+ sio_data->internal |= BIT(0);
+ if (reg & BIT(1))
+ sio_data->internal |= BIT(1);
/*
* On IT8782F, UART6 pins overlap with VIN5, VIN6, and VIN7.
@@ -2144,11 +2697,12 @@ static int __init it87_find(unsigned short *address,
* temperature source here, skip_temp is preliminary.
*/
if (uart6) {
- sio_data->skip_in |= (1 << 5) | (1 << 6);
- sio_data->skip_temp |= (1 << 2);
+ sio_data->skip_in |= BIT(5) | BIT(6);
+ sio_data->skip_temp |= BIT(2);
}
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
}
if (sio_data->beep_pin)
pr_info("Beeping is supported\n");
@@ -2157,8 +2711,8 @@ static int __init it87_find(unsigned short *address,
board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (board_vendor && board_name) {
- if (strcmp(board_vendor, "nVIDIA") == 0
- && strcmp(board_name, "FN68PT") == 0) {
+ if (strcmp(board_vendor, "nVIDIA") == 0 &&
+ strcmp(board_name, "FN68PT") == 0) {
/*
* On the Shuttle SN68PT, FAN_CTL2 is apparently not
* connected to a fan, but to something else. One user
@@ -2168,373 +2722,15 @@ static int __init it87_find(unsigned short *address,
* the same board is ever used in other systems.
*/
pr_info("Disabling pwm2 due to hardware constraints\n");
- sio_data->skip_pwm = (1 << 1);
+ sio_data->skip_pwm = BIT(1);
}
}
exit:
- superio_exit();
+ superio_exit(sioaddr);
return err;
}
-static void it87_remove_files(struct device *dev)
-{
- struct it87_data *data = platform_get_drvdata(pdev);
- struct it87_sio_data *sio_data = dev_get_platdata(dev);
- int i;
-
- sysfs_remove_group(&dev->kobj, &it87_group);
- for (i = 0; i < 10; i++) {
- if (sio_data->skip_in & (1 << i))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
- if (it87_attributes_in_beep[i])
- sysfs_remove_file(&dev->kobj,
- it87_attributes_in_beep[i]);
- }
- for (i = 0; i < 3; i++) {
- if (!(data->has_temp & (1 << i)))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_temp[i]);
- if (has_temp_offset(data))
- sysfs_remove_file(&dev->kobj,
- it87_attributes_temp_offset[i]);
- if (sio_data->beep_pin)
- sysfs_remove_file(&dev->kobj,
- it87_attributes_temp_beep[i]);
- }
- for (i = 0; i < 6; i++) {
- if (!(data->has_fan & (1 << i)))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_fan[i]);
- if (sio_data->beep_pin)
- sysfs_remove_file(&dev->kobj,
- it87_attributes_fan_beep[i]);
- if (i < 3 && !has_16bit_fans(data))
- sysfs_remove_file(&dev->kobj,
- it87_attributes_fan_div[i]);
- }
- for (i = 0; i < 3; i++) {
- if (sio_data->skip_pwm & (1 << i))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_pwm[i]);
- if (has_old_autopwm(data))
- sysfs_remove_group(&dev->kobj,
- &it87_group_autopwm[i]);
- }
- if (!sio_data->skip_vid)
- sysfs_remove_group(&dev->kobj, &it87_group_vid);
- sysfs_remove_group(&dev->kobj, &it87_group_label);
-}
-
-static int it87_probe(struct platform_device *pdev)
-{
- struct it87_data *data;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct it87_sio_data *sio_data = dev_get_platdata(dev);
- int err = 0, i;
- int enable_pwm_interface;
- int fan_beep_need_rw;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
- DRVNAME)) {
- dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
- (unsigned long)res->start,
- (unsigned long)(res->start + IT87_EC_EXTENT - 1));
- return -EBUSY;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct it87_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->addr = res->start;
- data->type = sio_data->type;
- data->features = it87_devices[sio_data->type].features;
- data->peci_mask = it87_devices[sio_data->type].peci_mask;
- data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
- data->name = it87_devices[sio_data->type].name;
- /*
- * IT8705F Datasheet 0.4.1, 3h == Version G.
- * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
- * These are the first revisions with 16-bit tachometer support.
- */
- switch (data->type) {
- case it87:
- if (sio_data->revision >= 0x03) {
- data->features &= ~FEAT_OLD_AUTOPWM;
- data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS;
- }
- break;
- case it8712:
- if (sio_data->revision >= 0x08) {
- data->features &= ~FEAT_OLD_AUTOPWM;
- data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS |
- FEAT_FIVE_FANS;
- }
- break;
- default:
- break;
- }
-
- /* Now, we do the remaining detection. */
- if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
- || it87_read_value(data, IT87_REG_CHIPID) != 0x90)
- return -ENODEV;
-
- platform_set_drvdata(pdev, data);
-
- mutex_init(&data->update_lock);
-
- /* Check PWM configuration */
- enable_pwm_interface = it87_check_pwm(dev);
-
- /* Starting with IT8721F, we handle scaling of internal voltages */
- if (has_12mv_adc(data)) {
- if (sio_data->internal & (1 << 0))
- data->in_scaled |= (1 << 3); /* in3 is AVCC */
- if (sio_data->internal & (1 << 1))
- data->in_scaled |= (1 << 7); /* in7 is VSB */
- if (sio_data->internal & (1 << 2))
- data->in_scaled |= (1 << 8); /* in8 is Vbat */
- if (sio_data->internal & (1 << 3))
- data->in_scaled |= (1 << 9); /* in9 is AVCC */
- } else if (sio_data->type == it8781 || sio_data->type == it8782 ||
- sio_data->type == it8783) {
- if (sio_data->internal & (1 << 0))
- data->in_scaled |= (1 << 3); /* in3 is VCC5V */
- if (sio_data->internal & (1 << 1))
- data->in_scaled |= (1 << 7); /* in7 is VCCH5V */
- }
-
- data->has_temp = 0x07;
- if (sio_data->skip_temp & (1 << 2)) {
- if (sio_data->type == it8782
- && !(it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x80))
- data->has_temp &= ~(1 << 2);
- }
-
- /* Initialize the IT87 chip */
- it87_init_device(pdev);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &it87_group);
- if (err)
- return err;
-
- for (i = 0; i < 10; i++) {
- if (sio_data->skip_in & (1 << i))
- continue;
- err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
- if (err)
- goto error;
- if (sio_data->beep_pin && it87_attributes_in_beep[i]) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_in_beep[i]);
- if (err)
- goto error;
- }
- }
-
- for (i = 0; i < 3; i++) {
- if (!(data->has_temp & (1 << i)))
- continue;
- err = sysfs_create_group(&dev->kobj, &it87_group_temp[i]);
- if (err)
- goto error;
- if (has_temp_offset(data)) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_temp_offset[i]);
- if (err)
- goto error;
- }
- if (sio_data->beep_pin) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_temp_beep[i]);
- if (err)
- goto error;
- }
- }
-
- /* Do not create fan files for disabled fans */
- fan_beep_need_rw = 1;
- for (i = 0; i < 6; i++) {
- if (!(data->has_fan & (1 << i)))
- continue;
- err = sysfs_create_group(&dev->kobj, &it87_group_fan[i]);
- if (err)
- goto error;
-
- if (i < 3 && !has_16bit_fans(data)) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_fan_div[i]);
- if (err)
- goto error;
- }
-
- if (sio_data->beep_pin) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_fan_beep[i]);
- if (err)
- goto error;
- if (!fan_beep_need_rw)
- continue;
-
- /*
- * As we have a single beep enable bit for all fans,
- * only the first enabled fan has a writable attribute
- * for it.
- */
- if (sysfs_chmod_file(&dev->kobj,
- it87_attributes_fan_beep[i],
- S_IRUGO | S_IWUSR))
- dev_dbg(dev, "chmod +w fan%d_beep failed\n",
- i + 1);
- fan_beep_need_rw = 0;
- }
- }
-
- if (enable_pwm_interface) {
- for (i = 0; i < 3; i++) {
- if (sio_data->skip_pwm & (1 << i))
- continue;
- err = sysfs_create_group(&dev->kobj,
- &it87_group_pwm[i]);
- if (err)
- goto error;
-
- if (!has_old_autopwm(data))
- continue;
- err = sysfs_create_group(&dev->kobj,
- &it87_group_autopwm[i]);
- if (err)
- goto error;
- }
- }
-
- if (!sio_data->skip_vid) {
- data->vrm = vid_which_vrm();
- /* VID reading from Super-I/O config space if available */
- data->vid = sio_data->vid_value;
- err = sysfs_create_group(&dev->kobj, &it87_group_vid);
- if (err)
- goto error;
- }
-
- /* Export labels for internal sensors */
- for (i = 0; i < 4; i++) {
- if (!(sio_data->internal & (1 << i)))
- continue;
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_label[i]);
- if (err)
- goto error;
- }
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error;
- }
-
- return 0;
-
-error:
- it87_remove_files(dev);
- return err;
-}
-
-static int it87_remove(struct platform_device *pdev)
-{
- struct it87_data *data = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(data->hwmon_dev);
- it87_remove_files(&pdev->dev);
-
- return 0;
-}
-
-/*
- * Must be called with data->update_lock held, except during initialization.
- * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
- * would slow down the IT87 access and should not be necessary.
- */
-static int it87_read_value(struct it87_data *data, u8 reg)
-{
- outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
- return inb_p(data->addr + IT87_DATA_REG_OFFSET);
-}
-
-/*
- * Must be called with data->update_lock held, except during initialization.
- * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
- * would slow down the IT87 access and should not be necessary.
- */
-static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
-{
- outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
- outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
-}
-
-/* Return 1 if and only if the PWM interface is safe to use */
-static int it87_check_pwm(struct device *dev)
-{
- struct it87_data *data = dev_get_drvdata(dev);
- /*
- * Some BIOSes fail to correctly configure the IT87 fans. All fans off
- * and polarity set to active low is sign that this is the case so we
- * disable pwm control to protect the user.
- */
- int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
- if ((tmp & 0x87) == 0) {
- if (fix_pwm_polarity) {
- /*
- * The user asks us to attempt a chip reconfiguration.
- * This means switching to active high polarity and
- * inverting all fan speed values.
- */
- int i;
- u8 pwm[3];
-
- for (i = 0; i < 3; i++)
- pwm[i] = it87_read_value(data,
- IT87_REG_PWM(i));
-
- /*
- * If any fan is in automatic pwm mode, the polarity
- * might be correct, as suspicious as it seems, so we
- * better don't change anything (but still disable the
- * PWM interface).
- */
- if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
- dev_info(dev,
- "Reconfiguring PWM to active high polarity\n");
- it87_write_value(data, IT87_REG_FAN_CTL,
- tmp | 0x87);
- for (i = 0; i < 3; i++)
- it87_write_value(data,
- IT87_REG_PWM(i),
- 0x7f & ~pwm[i]);
- return 1;
- }
-
- dev_info(dev,
- "PWM configuration is too broken to be fixed\n");
- }
-
- dev_info(dev,
- "Detected broken BIOS defaults, disabling PWM interface\n");
- return 0;
- } else if (fix_pwm_polarity) {
- dev_info(dev,
- "PWM configuration looks sane, won't touch\n");
- }
-
- return 1;
-}
-
/* Called when we have found a new IT87. */
static void it87_init_device(struct platform_device *pdev)
{
@@ -2556,7 +2752,7 @@ static void it87_init_device(struct platform_device *pdev)
* these have separate registers for the temperature mapping and the
* manual duty cycle.
*/
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < NUM_AUTO_PWM; i++) {
data->pwm_temp_map[i] = i;
data->pwm_duty[i] = 0x7f; /* Full speed */
data->auto_pwm[i][3] = 0x7f; /* Full speed, hard-coded */
@@ -2569,12 +2765,12 @@ static void it87_init_device(struct platform_device *pdev)
* means -1 degree C, which surprisingly doesn't trigger an alarm,
* but is still confusing, so change to 127 degrees C.
*/
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < NUM_VIN_LIMIT; i++) {
tmp = it87_read_value(data, IT87_REG_VIN_MIN(i));
if (tmp == 0xff)
it87_write_value(data, IT87_REG_VIN_MIN(i), 0);
}
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < NUM_TEMP_LIMIT; i++) {
tmp = it87_read_value(data, IT87_REG_TEMP_HIGH(i));
if (tmp == 0xff)
it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
@@ -2619,158 +2815,245 @@ static void it87_init_device(struct platform_device *pdev)
/* Check for additional fans */
if (has_five_fans(data)) {
- if (tmp & (1 << 4))
- data->has_fan |= (1 << 3); /* fan4 enabled */
- if (tmp & (1 << 5))
- data->has_fan |= (1 << 4); /* fan5 enabled */
- if (has_six_fans(data) && (tmp & (1 << 2)))
- data->has_fan |= (1 << 5); /* fan6 enabled */
+ if (tmp & BIT(4))
+ data->has_fan |= BIT(3); /* fan4 enabled */
+ if (tmp & BIT(5))
+ data->has_fan |= BIT(4); /* fan5 enabled */
+ if (has_six_fans(data) && (tmp & BIT(2)))
+ data->has_fan |= BIT(5); /* fan6 enabled */
}
/* Fan input pins may be used for alternative functions */
data->has_fan &= ~sio_data->skip_fan;
+ /* Check if pwm5, pwm6 are enabled */
+ if (has_six_pwm(data)) {
+ /* The following code may be IT8620E specific */
+ tmp = it87_read_value(data, IT87_REG_FAN_DIV);
+ if ((tmp & 0xc0) == 0xc0)
+ sio_data->skip_pwm |= BIT(4);
+ if (!(tmp & BIT(3)))
+ sio_data->skip_pwm |= BIT(5);
+ }
+
/* Start monitoring */
it87_write_value(data, IT87_REG_CONFIG,
(it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
| (update_vbat ? 0x41 : 0x01));
}
-static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
+/* Return 1 if and only if the PWM interface is safe to use */
+static int it87_check_pwm(struct device *dev)
{
- data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
- if (has_newer_autopwm(data)) {
- data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
- data->pwm_duty[nr] = it87_read_value(data,
- IT87_REG_PWM_DUTY(nr));
- } else {
- if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
- data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
- else /* Manual mode */
- data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
- }
+ struct it87_data *data = dev_get_drvdata(dev);
+ /*
+ * Some BIOSes fail to correctly configure the IT87 fans. All fans off
+ * and polarity set to active low is sign that this is the case so we
+ * disable pwm control to protect the user.
+ */
+ int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
- if (has_old_autopwm(data)) {
- int i;
+ if ((tmp & 0x87) == 0) {
+ if (fix_pwm_polarity) {
+ /*
+ * The user asks us to attempt a chip reconfiguration.
+ * This means switching to active high polarity and
+ * inverting all fan speed values.
+ */
+ int i;
+ u8 pwm[3];
- for (i = 0; i < 5 ; i++)
- data->auto_temp[nr][i] = it87_read_value(data,
- IT87_REG_AUTO_TEMP(nr, i));
- for (i = 0; i < 3 ; i++)
- data->auto_pwm[nr][i] = it87_read_value(data,
- IT87_REG_AUTO_PWM(nr, i));
+ for (i = 0; i < ARRAY_SIZE(pwm); i++)
+ pwm[i] = it87_read_value(data,
+ IT87_REG_PWM[i]);
+
+ /*
+ * If any fan is in automatic pwm mode, the polarity
+ * might be correct, as suspicious as it seems, so we
+ * better don't change anything (but still disable the
+ * PWM interface).
+ */
+ if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
+ dev_info(dev,
+ "Reconfiguring PWM to active high polarity\n");
+ it87_write_value(data, IT87_REG_FAN_CTL,
+ tmp | 0x87);
+ for (i = 0; i < 3; i++)
+ it87_write_value(data,
+ IT87_REG_PWM[i],
+ 0x7f & ~pwm[i]);
+ return 1;
+ }
+
+ dev_info(dev,
+ "PWM configuration is too broken to be fixed\n");
+ }
+
+ dev_info(dev,
+ "Detected broken BIOS defaults, disabling PWM interface\n");
+ return 0;
+ } else if (fix_pwm_polarity) {
+ dev_info(dev,
+ "PWM configuration looks sane, won't touch\n");
}
+
+ return 1;
}
-static struct it87_data *it87_update_device(struct device *dev)
+static int it87_probe(struct platform_device *pdev)
{
- struct it87_data *data = dev_get_drvdata(dev);
- int i;
+ struct it87_data *data;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct it87_sio_data *sio_data = dev_get_platdata(dev);
+ int enable_pwm_interface;
+ struct device *hwmon_dev;
- mutex_lock(&data->update_lock);
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
+ DRVNAME)) {
+ dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
+ (unsigned long)res->start,
+ (unsigned long)(res->start + IT87_EC_EXTENT - 1));
+ return -EBUSY;
+ }
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- if (update_vbat) {
- /*
- * Cleared after each update, so reenable. Value
- * returned by this read will be previous value
- */
- it87_write_value(data, IT87_REG_CONFIG,
- it87_read_value(data, IT87_REG_CONFIG) | 0x40);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct it87_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->addr = res->start;
+ data->type = sio_data->type;
+ data->features = it87_devices[sio_data->type].features;
+ data->peci_mask = it87_devices[sio_data->type].peci_mask;
+ data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
+ /*
+ * IT8705F Datasheet 0.4.1, 3h == Version G.
+ * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
+ * These are the first revisions with 16-bit tachometer support.
+ */
+ switch (data->type) {
+ case it87:
+ if (sio_data->revision >= 0x03) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS;
}
- for (i = 0; i <= 7; i++) {
- data->in[i][0] =
- it87_read_value(data, IT87_REG_VIN(i));
- data->in[i][1] =
- it87_read_value(data, IT87_REG_VIN_MIN(i));
- data->in[i][2] =
- it87_read_value(data, IT87_REG_VIN_MAX(i));
+ break;
+ case it8712:
+ if (sio_data->revision >= 0x08) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS |
+ FEAT_FIVE_FANS;
}
- /* in8 (battery) has no limit registers */
- data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
- if (data->type == it8603)
- data->in[9][0] = it87_read_value(data, 0x2f);
+ break;
+ default:
+ break;
+ }
- for (i = 0; i < 6; i++) {
- /* Skip disabled fans */
- if (!(data->has_fan & (1 << i)))
- continue;
+ /* Now, we do the remaining detection. */
+ if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80) ||
+ it87_read_value(data, IT87_REG_CHIPID) != 0x90)
+ return -ENODEV;
- data->fan[i][1] =
- it87_read_value(data, IT87_REG_FAN_MIN[i]);
- data->fan[i][0] = it87_read_value(data,
- IT87_REG_FAN[i]);
- /* Add high byte if in 16-bit mode */
- if (has_16bit_fans(data)) {
- data->fan[i][0] |= it87_read_value(data,
- IT87_REG_FANX[i]) << 8;
- data->fan[i][1] |= it87_read_value(data,
- IT87_REG_FANX_MIN[i]) << 8;
- }
- }
- for (i = 0; i < 3; i++) {
- if (!(data->has_temp & (1 << i)))
- continue;
- data->temp[i][0] =
- it87_read_value(data, IT87_REG_TEMP(i));
- data->temp[i][1] =
- it87_read_value(data, IT87_REG_TEMP_LOW(i));
- data->temp[i][2] =
- it87_read_value(data, IT87_REG_TEMP_HIGH(i));
- if (has_temp_offset(data))
- data->temp[i][3] =
- it87_read_value(data,
- IT87_REG_TEMP_OFFSET[i]);
- }
+ platform_set_drvdata(pdev, data);
- /* Newer chips don't have clock dividers */
- if ((data->has_fan & 0x07) && !has_16bit_fans(data)) {
- i = it87_read_value(data, IT87_REG_FAN_DIV);
- data->fan_div[0] = i & 0x07;
- data->fan_div[1] = (i >> 3) & 0x07;
- data->fan_div[2] = (i & 0x40) ? 3 : 1;
- }
+ mutex_init(&data->update_lock);
- data->alarms =
- it87_read_value(data, IT87_REG_ALARM1) |
- (it87_read_value(data, IT87_REG_ALARM2) << 8) |
- (it87_read_value(data, IT87_REG_ALARM3) << 16);
- data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+ /* Check PWM configuration */
+ enable_pwm_interface = it87_check_pwm(dev);
- data->fan_main_ctrl = it87_read_value(data,
- IT87_REG_FAN_MAIN_CTRL);
- data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
- for (i = 0; i < 3; i++)
- it87_update_pwm_ctrl(data, i);
+ /* Starting with IT8721F, we handle scaling of internal voltages */
+ if (has_12mv_adc(data)) {
+ if (sio_data->internal & BIT(0))
+ data->in_scaled |= BIT(3); /* in3 is AVCC */
+ if (sio_data->internal & BIT(1))
+ data->in_scaled |= BIT(7); /* in7 is VSB */
+ if (sio_data->internal & BIT(2))
+ data->in_scaled |= BIT(8); /* in8 is Vbat */
+ if (sio_data->internal & BIT(3))
+ data->in_scaled |= BIT(9); /* in9 is AVCC */
+ } else if (sio_data->type == it8781 || sio_data->type == it8782 ||
+ sio_data->type == it8783) {
+ if (sio_data->internal & BIT(0))
+ data->in_scaled |= BIT(3); /* in3 is VCC5V */
+ if (sio_data->internal & BIT(1))
+ data->in_scaled |= BIT(7); /* in7 is VCCH5V */
+ }
- data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
- data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
- /*
- * The IT8705F does not have VID capability.
- * The IT8718F and later don't use IT87_REG_VID for the
- * same purpose.
- */
- if (data->type == it8712 || data->type == it8716) {
- data->vid = it87_read_value(data, IT87_REG_VID);
- /*
- * The older IT8712F revisions had only 5 VID pins,
- * but we assume it is always safe to read 6 bits.
- */
- data->vid &= 0x3f;
- }
- data->last_updated = jiffies;
- data->valid = 1;
+ data->has_temp = 0x07;
+ if (sio_data->skip_temp & BIT(2)) {
+ if (sio_data->type == it8782 &&
+ !(it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x80))
+ data->has_temp &= ~BIT(2);
}
- mutex_unlock(&data->update_lock);
+ data->in_internal = sio_data->internal;
+ data->has_in = 0x3ff & ~sio_data->skip_in;
+
+ if (has_six_temp(data)) {
+ u8 reg = it87_read_value(data, IT87_REG_TEMP456_ENABLE);
+
+ /* Check for additional temperature sensors */
+ if ((reg & 0x03) >= 0x02)
+ data->has_temp |= BIT(3);
+ if (((reg >> 2) & 0x03) >= 0x02)
+ data->has_temp |= BIT(4);
+ if (((reg >> 4) & 0x03) >= 0x02)
+ data->has_temp |= BIT(5);
+
+ /* Check for additional voltage sensors */
+ if ((reg & 0x03) == 0x01)
+ data->has_in |= BIT(10);
+ if (((reg >> 2) & 0x03) == 0x01)
+ data->has_in |= BIT(11);
+ if (((reg >> 4) & 0x03) == 0x01)
+ data->has_in |= BIT(12);
+ }
- return data;
+ data->has_beep = !!sio_data->beep_pin;
+
+ /* Initialize the IT87 chip */
+ it87_init_device(pdev);
+
+ if (!sio_data->skip_vid) {
+ data->has_vid = true;
+ data->vrm = vid_which_vrm();
+ /* VID reading from Super-I/O config space if available */
+ data->vid = sio_data->vid_value;
+ }
+
+ /* Prepare for sysfs hooks */
+ data->groups[0] = &it87_group;
+ data->groups[1] = &it87_group_in;
+ data->groups[2] = &it87_group_temp;
+ data->groups[3] = &it87_group_fan;
+
+ if (enable_pwm_interface) {
+ data->has_pwm = BIT(ARRAY_SIZE(IT87_REG_PWM)) - 1;
+ data->has_pwm &= ~sio_data->skip_pwm;
+
+ data->groups[4] = &it87_group_pwm;
+ if (has_old_autopwm(data) || has_newer_autopwm(data))
+ data->groups[5] = &it87_group_auto_pwm;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ it87_devices[sio_data->type].name,
+ data, data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __init it87_device_add(unsigned short address,
+static struct platform_driver it87_driver = {
+ .driver = {
+ .name = DRVNAME,
+ },
+ .probe = it87_probe,
+};
+
+static int __init it87_device_add(int index, unsigned short address,
const struct it87_sio_data *sio_data)
{
+ struct platform_device *pdev;
struct resource res = {
.start = address + IT87_EC_OFFSET,
.end = address + IT87_EC_OFFSET + IT87_EC_EXTENT - 1,
@@ -2781,14 +3064,11 @@ static int __init it87_device_add(unsigned short address,
err = acpi_check_resource_conflict(&res);
if (err)
- goto exit;
+ return err;
pdev = platform_device_alloc(DRVNAME, address);
- if (!pdev) {
- err = -ENOMEM;
- pr_err("Device allocation failed\n");
- goto exit;
- }
+ if (!pdev)
+ return -ENOMEM;
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
@@ -2809,44 +3089,61 @@ static int __init it87_device_add(unsigned short address,
goto exit_device_put;
}
+ it87_pdev[index] = pdev;
return 0;
exit_device_put:
platform_device_put(pdev);
-exit:
return err;
}
static int __init sm_it87_init(void)
{
- int err;
- unsigned short isa_address = 0;
+ int sioaddr[2] = { REG_2E, REG_4E };
struct it87_sio_data sio_data;
+ unsigned short isa_address;
+ bool found = false;
+ int i, err;
- memset(&sio_data, 0, sizeof(struct it87_sio_data));
- err = it87_find(&isa_address, &sio_data);
- if (err)
- return err;
err = platform_driver_register(&it87_driver);
if (err)
return err;
- err = it87_device_add(isa_address, &sio_data);
- if (err) {
- platform_driver_unregister(&it87_driver);
- return err;
+ for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
+ memset(&sio_data, 0, sizeof(struct it87_sio_data));
+ isa_address = 0;
+ err = it87_find(sioaddr[i], &isa_address, &sio_data);
+ if (err || isa_address == 0)
+ continue;
+
+ err = it87_device_add(i, isa_address, &sio_data);
+ if (err)
+ goto exit_dev_unregister;
+ found = true;
}
+ if (!found) {
+ err = -ENODEV;
+ goto exit_unregister;
+ }
return 0;
+
+exit_dev_unregister:
+ /* NULL check handled by platform_device_unregister */
+ platform_device_unregister(it87_pdev[0]);
+exit_unregister:
+ platform_driver_unregister(&it87_driver);
+ return err;
}
static void __exit sm_it87_exit(void)
{
- platform_device_unregister(pdev);
+ /* NULL check handled by platform_device_unregister */
+ platform_device_unregister(it87_pdev[1]);
+ platform_device_unregister(it87_pdev[0]);
platform_driver_unregister(&it87_driver);
}
-
MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
new file mode 100644
index 000000000000..30a100e70a0d
--- /dev/null
+++ b/drivers/hwmon/max31722.c
@@ -0,0 +1,165 @@
+/*
+ * max31722 - hwmon driver for Maxim Integrated MAX31722/MAX31723 SPI
+ * digital thermometer and thermostats.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#define MAX31722_REG_CFG 0x00
+#define MAX31722_REG_TEMP_LSB 0x01
+
+#define MAX31722_MODE_CONTINUOUS 0x00
+#define MAX31722_MODE_STANDBY 0x01
+#define MAX31722_MODE_MASK 0xFE
+#define MAX31722_RESOLUTION_12BIT 0x06
+#define MAX31722_WRITE_MASK 0x80
+
+struct max31722_data {
+ struct device *hwmon_dev;
+ struct spi_device *spi_device;
+ u8 mode;
+};
+
+static int max31722_set_mode(struct max31722_data *data, u8 mode)
+{
+ int ret;
+ struct spi_device *spi = data->spi_device;
+ u8 buf[2] = {
+ MAX31722_REG_CFG | MAX31722_WRITE_MASK,
+ (data->mode & MAX31722_MODE_MASK) | mode
+ };
+
+ ret = spi_write(spi, &buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set sensor mode.\n");
+ return ret;
+ }
+ data->mode = (data->mode & MAX31722_MODE_MASK) | mode;
+
+ return 0;
+}
+
+static ssize_t max31722_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct max31722_data *data = dev_get_drvdata(dev);
+
+ ret = spi_w8r16(data->spi_device, MAX31722_REG_TEMP_LSB);
+ if (ret < 0)
+ return ret;
+ /* Keep 12 bits and multiply by the scale of 62.5 millidegrees/bit. */
+ return sprintf(buf, "%d\n", (s16)le16_to_cpu(ret) * 125 / 32);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ max31722_show_temp, NULL, 0);
+
+static struct attribute *max31722_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(max31722);
+
+static int max31722_probe(struct spi_device *spi)
+{
+ int ret;
+ struct max31722_data *data;
+
+ data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, data);
+ data->spi_device = spi;
+ /*
+ * Set SD bit to 0 so we can have continuous measurements.
+ * Set resolution to 12 bits for maximum precision.
+ */
+ data->mode = MAX31722_MODE_CONTINUOUS | MAX31722_RESOLUTION_12BIT;
+ ret = max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
+ if (ret < 0)
+ return ret;
+
+ data->hwmon_dev = hwmon_device_register_with_groups(&spi->dev,
+ spi->modalias,
+ data,
+ max31722_groups);
+ if (IS_ERR(data->hwmon_dev)) {
+ max31722_set_mode(data, MAX31722_MODE_STANDBY);
+ return PTR_ERR(data->hwmon_dev);
+ }
+
+ return 0;
+}
+
+static int max31722_remove(struct spi_device *spi)
+{
+ struct max31722_data *data = spi_get_drvdata(spi);
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+}
+
+static int __maybe_unused max31722_suspend(struct device *dev)
+{
+ struct spi_device *spi_device = to_spi_device(dev);
+ struct max31722_data *data = spi_get_drvdata(spi_device);
+
+ return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+}
+
+static int __maybe_unused max31722_resume(struct device *dev)
+{
+ struct spi_device *spi_device = to_spi_device(dev);
+ struct max31722_data *data = spi_get_drvdata(spi_device);
+
+ return max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
+}
+
+static SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
+
+static const struct spi_device_id max31722_spi_id[] = {
+ {"max31722", 0},
+ {"max31723", 0},
+ {}
+};
+
+static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
+ {"MAX31722", 0},
+ {"MAX31723", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, max31722_spi_id);
+
+static struct spi_driver max31722_driver = {
+ .driver = {
+ .name = "max31722",
+ .pm = &max31722_pm_ops,
+ .acpi_match_table = ACPI_PTR(max31722_acpi_id),
+ },
+ .probe = max31722_probe,
+ .remove = max31722_remove,
+ .id_table = max31722_spi_id,
+};
+
+module_spi_driver(max31722_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("max31722 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 131a2815dbda..d24d7b6047f2 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -449,7 +449,7 @@ static int sch5636_probe(struct platform_device *pdev)
}
revision[i] = val;
}
- pr_info("Found %s chip at %#hx, revison: %d.%02d\n", DEVNAME,
+ pr_info("Found %s chip at %#hx, revision: %d.%02d\n", DEVNAME,
data->addr, revision[0], revision[1]);
/* Read all temp + fan ctrl registers to determine which are active */
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 37a8a907febe..05dbcce70b0e 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -522,7 +522,7 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
static void update_flush(ide_drive_t *drive)
{
u16 *id = drive->id;
- unsigned flush = 0;
+ bool wc = false;
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
unsigned long long capacity;
@@ -546,12 +546,12 @@ static void update_flush(ide_drive_t *drive)
drive->name, barrier ? "" : "not ");
if (barrier) {
- flush = REQ_FLUSH;
+ wc = true;
blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
}
}
- blk_queue_flush(drive->queue, flush);
+ blk_queue_write_cache(drive->queue, wc, false);
}
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index c6935de425fa..c96649292b55 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -766,6 +766,67 @@ static struct cpuidle_state knl_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state bxt_cstates[] = {
+ {
+ .name = "C1-BXT",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-BXT",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-BXT",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 133,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7s-BXT",
+ .desc = "MWAIT 0x31",
+ .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 155,
+ .target_residency = 155,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C8-BXT",
+ .desc = "MWAIT 0x40",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1000,
+ .target_residency = 1000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C9-BXT",
+ .desc = "MWAIT 0x50",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 2000,
+ .target_residency = 2000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C10-BXT",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 10000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
/**
* intel_idle
* @dev: cpuidle_device
@@ -950,6 +1011,11 @@ static const struct idle_cpu idle_cpu_knl = {
.state_table = knl_cstates,
};
+static const struct idle_cpu idle_cpu_bxt = {
+ .state_table = bxt_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -985,6 +1051,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x9e, idle_cpu_skl),
ICPU(0x55, idle_cpu_skx),
ICPU(0x57, idle_cpu_knl),
+ ICPU(0x5c, idle_cpu_bxt),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -1075,6 +1142,73 @@ static void ivt_idle_state_table_update(void)
/* else, 1 and 2 socket systems use default ivt_cstates */
}
+
+/*
+ * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+ */
+
+static unsigned int irtl_ns_units[] = {
+ 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
+
+static unsigned long long irtl_2_usec(unsigned long long irtl)
+{
+ unsigned long long ns;
+
+ ns = irtl_ns_units[(irtl >> 10) & 0x3];
+
+ return div64_u64((irtl & 0x3FF) * ns, 1000);
+}
+/*
+ * bxt_idle_state_table_update(void)
+ *
+ * On BXT, we trust the IRTL to show the definitive maximum latency
+ * We use the same value for target_residency.
+ */
+static void bxt_idle_state_table_update(void)
+{
+ unsigned long long msr;
+
+ rdmsrl(MSR_PKGC6_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[2].exit_latency = usec;
+ bxt_cstates[2].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC7_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[3].exit_latency = usec;
+ bxt_cstates[3].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC8_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[4].exit_latency = usec;
+ bxt_cstates[4].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC9_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[5].exit_latency = usec;
+ bxt_cstates[5].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC10_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[6].exit_latency = usec;
+ bxt_cstates[6].target_residency = usec;
+ }
+
+}
/*
* sklh_idle_state_table_update(void)
*
@@ -1130,6 +1264,9 @@ static void intel_idle_state_table_update(void)
case 0x3e: /* IVT */
ivt_idle_state_table_update();
break;
+ case 0x5c: /* BXT */
+ bxt_idle_state_table_update();
+ break;
case 0x5e: /* SKL-H */
sklh_idle_state_table_update();
break;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 6f8b084e13d0..3d8ff09eba57 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -143,9 +143,9 @@ struct analog_port {
#include <linux/i8253.h>
-#define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
-#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
-#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
+#define GET_TIME(x) do { if (boot_cpu_has(X86_FEATURE_TSC)) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
+#define DELTA(x,y) (boot_cpu_has(X86_FEATURE_TSC) ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
+#define TIME_NAME (boot_cpu_has(X86_FEATURE_TSC)?"TSC":"PIT")
static unsigned int get_time_pit(void)
{
unsigned long flags;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 8adaaeae3268..49721b4e1975 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -36,7 +36,7 @@ static void irq_remapping_disable_io_apic(void)
* As this gets called during crash dump, keep this simple for
* now.
*/
- if (cpu_has_apic || apic_from_smp_config())
+ if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
disconnect_bsp_APIC(0);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3e124793e224..81f88ada3a61 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -27,6 +27,7 @@ config ARM_GIC_V3
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
select IRQ_DOMAIN_HIERARCHY
+ select PARTITION_PERCPU
config ARM_GIC_V3_ITS
bool
@@ -244,3 +245,11 @@ config IRQ_MXS
config MVEBU_ODMI
bool
select GENERIC_MSI_IRQ_DOMAIN
+
+config LS_SCFG_MSI
+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ depends on PCI && PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
+
+config PARTITION_PERCPU
+ bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b03cfcbbac6b..f828244b44c2 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
+obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
+obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
@@ -65,3 +67,4 @@ obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
+obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 25384255b30f..63d980995d17 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <asm/irq.h>
-#include <asm-generic/msi.h>
+#include <asm/msi.h>
/* MSIX message address format: local GIC target */
#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index b6e950d4782a..72ff1d5c5de6 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -195,7 +195,7 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
* Ensure that stores to normal memory are visible to the
* other CPUs before issuing the IPI.
*/
- dsb();
+ smp_wmb();
for_each_cpu(cpu, mask) {
writel(1 << ipi, mailbox0_base + 16 * cpu);
@@ -223,6 +223,7 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
.priority = 100,
};
+#ifdef CONFIG_ARM
int __init bcm2836_smp_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
@@ -238,7 +239,7 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
static const struct smp_operations bcm2836_smp_ops __initconst = {
.smp_boot_secondary = bcm2836_smp_boot_secondary,
};
-
+#endif
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
@@ -252,12 +253,15 @@ bcm2836_arm_irqchip_smp_init(void)
/* Unmask IPIs to the boot CPU. */
bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
CPU_STARTING,
- (void *)smp_processor_id());
+ (void *)(uintptr_t)smp_processor_id());
register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
+
+#ifdef CONFIG_ARM
smp_set_ops(&bcm2836_smp_ops);
#endif
+#endif
}
/*
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 75573fa431ba..1eef56a89b1f 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -183,7 +183,7 @@ static int crossbar_domain_translate(struct irq_domain *d,
return -EINVAL;
*hwirq = fwspec->param[1];
- *type = fwspec->param[2];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index f174ce0ca361..97c0028e8388 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -50,14 +50,26 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
else if (type & IRQ_TYPE_EDGE_BOTH)
val |= confmask;
+ /* If the current configuration is the same, then we are done */
+ if (val == oldval)
+ return 0;
+
/*
* Write back the new configuration, and possibly re-enable
- * the interrupt. If we tried to write a new configuration and failed,
- * return an error.
+ * the interrupt. If we fail to write a new configuration for
+ * an SPI then WARN and return an error. If we fail to write the
+ * configuration for a PPI this is most likely because the GIC
+ * does not allow us to set the configuration or we are in a
+ * non-secure mode, and hence it may not be catastrophic.
*/
writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
- if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval)
- ret = -EINVAL;
+ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
+ if (WARN_ON(irq >= 32))
+ ret = -EINVAL;
+ else
+ pr_warn("GIC: PPI%d is secure or misconfigured\n",
+ irq - 16);
+ }
if (sync_access)
sync_access();
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 28f047c61baa..ad0d2960b664 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -49,6 +49,9 @@
/* APM X-Gene with GICv2m MSI_IIDR register value */
#define XGENE_GICV2M_MSI_IIDR 0x06000170
+/* Broadcom NS2 GICv2m MSI_IIDR register value */
+#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
+
/* List of flags for specific v2m implementation */
#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
@@ -62,6 +65,7 @@ struct v2m_data {
void __iomem *base; /* GICv2m virt address */
u32 spi_start; /* The SPI number that MSIs start */
u32 nr_spis; /* The number of SPIs for MSIs */
+ u32 spi_offset; /* offset to be subtracted from SPI number */
unsigned long *bm; /* MSI vector bitmap */
u32 flags; /* v2m flags for specific implementation */
};
@@ -102,7 +106,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
- msg->data -= v2m->spi_start;
+ msg->data -= v2m->spi_offset;
}
static struct irq_chip gicv2m_irq_chip = {
@@ -340,9 +344,20 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
* different from the standard GICv2m implementation where
* the MSI data is the absolute value within the range from
* spi_start to (spi_start + num_spis).
+ *
+ * Broadom NS2 GICv2m implementation has an erratum where the MSI data
+ * is 'spi_number - 32'
*/
- if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
+ switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
+ case XGENE_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = v2m->spi_start;
+ break;
+ case BCM_NS2_GICV2M_MSI_IIDR:
v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = 32;
+ break;
+ }
v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 39261798c59f..6bd881be24ea 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -55,6 +55,16 @@ struct its_collection {
};
/*
+ * The ITS_BASER structure - contains memory information and cached
+ * value of BASER register configuration.
+ */
+struct its_baser {
+ void *base;
+ u64 val;
+ u32 order;
+};
+
+/*
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
* list of devices writing to it.
@@ -66,14 +76,12 @@ struct its_node {
unsigned long phys_base;
struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write;
- struct {
- void *base;
- u32 order;
- } tables[GITS_BASER_NR_REGS];
+ struct its_baser tables[GITS_BASER_NR_REGS];
struct its_collection *collections;
struct list_head its_device_list;
u64 flags;
u32 ite_size;
+ u32 device_ids;
};
#define ITS_ITT_ALIGN SZ_256
@@ -838,6 +846,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
ids = GITS_TYPER_DEVBITS(typer);
}
+ its->device_ids = ids;
+
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
u64 type = GITS_BASER_TYPE(val);
@@ -913,6 +923,7 @@ retry_baser:
}
val |= alloc_pages - 1;
+ its->tables[i].val = val;
writeq_relaxed(val, its->base + GITS_BASER + i * 8);
tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -1138,9 +1149,22 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
return its_dev;
}
+static struct its_baser *its_get_baser(struct its_node *its, u32 type)
+{
+ int i;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (GITS_BASER_TYPE(its->tables[i].val) == type)
+ return &its->tables[i];
+ }
+
+ return NULL;
+}
+
static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs)
{
+ struct its_baser *baser;
struct its_device *dev;
unsigned long *lpi_map;
unsigned long flags;
@@ -1151,6 +1175,16 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nr_ites;
int sz;
+ baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+ /* Don't allow 'dev_id' that exceeds single, flat table limit */
+ if (baser) {
+ if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
+ GITS_BASER_ENTRY_SIZE(baser->val)))
+ return NULL;
+ } else if (ilog2(dev_id) >= its->device_ids)
+ return NULL;
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
* At least one bit of EventID is being used, hence a minimum
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 5b7d3c2129d8..1a1ea4f733c1 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -29,6 +29,7 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/irq-partition-percpu.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -44,6 +45,7 @@ struct redist_region {
};
struct gic_chip_data {
+ struct fwnode_handle *fwnode;
void __iomem *dist_base;
struct redist_region *redist_regions;
struct rdists rdists;
@@ -51,6 +53,7 @@ struct gic_chip_data {
u64 redist_stride;
u32 nr_redist_regions;
unsigned int irq_nr;
+ struct partition_desc *ppi_descs[16];
};
static struct gic_chip_data gic_data __read_mostly;
@@ -364,6 +367,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (static_key_true(&supports_deactivate))
gic_write_dir(irqnr);
#ifdef CONFIG_SMP
+ /*
+ * Unlike GICv2, we don't need an smp_rmb() here.
+ * The control dependency from gic_read_iar to
+ * the ISB in gic_write_eoir is enough to ensure
+ * that any shared data read by handle_IPI will
+ * be read after the ACK.
+ */
handle_IPI(irqnr, regs);
#else
WARN_ONCE(true, "Unexpected SGI received!\n");
@@ -383,6 +393,15 @@ static void __init gic_dist_init(void)
writel_relaxed(0, base + GICD_CTLR);
gic_dist_wait_for_rwp();
+ /*
+ * Configure SPIs as non-secure Group-1. This will only matter
+ * if the GIC only has a single security state. This will not
+ * do the right thing if the kernel is running in secure mode,
+ * but that's not the intended use case anyway.
+ */
+ for (i = 32; i < gic_data.irq_nr; i += 32)
+ writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
+
gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
/* Enable distributor with ARE, Group1 */
@@ -500,6 +519,9 @@ static void gic_cpu_init(void)
rbase = gic_data_rdist_sgi_base();
+ /* Configure SGIs/PPIs as non-secure Group-1 */
+ writel_relaxed(~0, rbase + GICR_IGROUPR0);
+
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
/* Give LPIs a spin */
@@ -812,10 +834,62 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
}
+static int gic_irq_domain_select(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ /* Not for us */
+ if (fwspec->fwnode != d->fwnode)
+ return 0;
+
+ /* If this is not DT, then we have a single domain */
+ if (!is_of_node(fwspec->fwnode))
+ return 1;
+
+ /*
+ * If this is a PPI and we have a 4th (non-null) parameter,
+ * then we need to match the partition domain.
+ */
+ if (fwspec->param_count >= 4 &&
+ fwspec->param[0] == 1 && fwspec->param[3] != 0)
+ return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
+
+ return d == gic_data.domain;
+}
+
static const struct irq_domain_ops gic_irq_domain_ops = {
.translate = gic_irq_domain_translate,
.alloc = gic_irq_domain_alloc,
.free = gic_irq_domain_free,
+ .select = gic_irq_domain_select,
+};
+
+static int partition_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct device_node *np;
+ int ret;
+
+ np = of_find_node_by_phandle(fwspec->param[3]);
+ if (WARN_ON(!np))
+ return -EINVAL;
+
+ ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
+ of_node_to_fwnode(np));
+ if (ret < 0)
+ return ret;
+
+ *hwirq = ret;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops partition_domain_ops = {
+ .translate = partition_domain_translate,
+ .select = gic_irq_domain_select,
};
static void gicv3_enable_quirks(void)
@@ -843,6 +917,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
+ gic_data.fwnode = handle;
gic_data.dist_base = dist_base;
gic_data.redist_regions = rdist_regs;
gic_data.nr_redist_regions = nr_redist_regions;
@@ -901,6 +976,119 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
return 0;
}
+static int get_cpu_number(struct device_node *dn)
+{
+ const __be32 *cell;
+ u64 hwid;
+ int i;
+
+ cell = of_get_property(dn, "reg", NULL);
+ if (!cell)
+ return -1;
+
+ hwid = of_read_number(cell, of_n_addr_cells(dn));
+
+ /*
+ * Non affinity bits must be set to 0 in the DT
+ */
+ if (hwid & ~MPIDR_HWID_BITMASK)
+ return -1;
+
+ for (i = 0; i < num_possible_cpus(); i++)
+ if (cpu_logical_map(i) == hwid)
+ return i;
+
+ return -1;
+}
+
+/* Create all possible partitions at boot time */
+static void gic_populate_ppi_partitions(struct device_node *gic_node)
+{
+ struct device_node *parts_node, *child_part;
+ int part_idx = 0, i;
+ int nr_parts;
+ struct partition_affinity *parts;
+
+ parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+ if (!parts_node)
+ return;
+
+ nr_parts = of_get_child_count(parts_node);
+
+ if (!nr_parts)
+ return;
+
+ parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
+ if (WARN_ON(!parts))
+ return;
+
+ for_each_child_of_node(parts_node, child_part) {
+ struct partition_affinity *part;
+ int n;
+
+ part = &parts[part_idx];
+
+ part->partition_id = of_node_to_fwnode(child_part);
+
+ pr_info("GIC: PPI partition %s[%d] { ",
+ child_part->name, part_idx);
+
+ n = of_property_count_elems_of_size(child_part, "affinity",
+ sizeof(u32));
+ WARN_ON(n <= 0);
+
+ for (i = 0; i < n; i++) {
+ int err, cpu;
+ u32 cpu_phandle;
+ struct device_node *cpu_node;
+
+ err = of_property_read_u32_index(child_part, "affinity",
+ i, &cpu_phandle);
+ if (WARN_ON(err))
+ continue;
+
+ cpu_node = of_find_node_by_phandle(cpu_phandle);
+ if (WARN_ON(!cpu_node))
+ continue;
+
+ cpu = get_cpu_number(cpu_node);
+ if (WARN_ON(cpu == -1))
+ continue;
+
+ pr_cont("%s[%d] ", cpu_node->full_name, cpu);
+
+ cpumask_set_cpu(cpu, &part->mask);
+ }
+
+ pr_cont("}\n");
+ part_idx++;
+ }
+
+ for (i = 0; i < 16; i++) {
+ unsigned int irq;
+ struct partition_desc *desc;
+ struct irq_fwspec ppi_fwspec = {
+ .fwnode = gic_data.fwnode,
+ .param_count = 3,
+ .param = {
+ [0] = 1,
+ [1] = i,
+ [2] = IRQ_TYPE_NONE,
+ },
+ };
+
+ irq = irq_create_fwspec_mapping(&ppi_fwspec);
+ if (WARN_ON(!irq))
+ continue;
+ desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
+ irq, &partition_domain_ops);
+ if (WARN_ON(!desc))
+ continue;
+
+ gic_data.ppi_descs[i] = desc;
+ }
+}
+
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
@@ -952,8 +1140,11 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
redist_stride, &node->fwnode);
- if (!err)
- return 0;
+ if (err)
+ goto out_unmap_rdist;
+
+ gic_populate_ppi_partitions(node);
+ return 0;
out_unmap_rdist:
for (i = 0; i < nr_redist_regions; i++)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 282344b95ec2..1de20e14a721 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -55,7 +55,7 @@
static void gic_check_cpu_features(void)
{
- WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
+ WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
TAINT_CPU_OUT_OF_SPEC,
"GICv3 system registers enabled, broken firmware!\n");
}
@@ -72,6 +72,9 @@ struct gic_chip_data {
struct irq_chip chip;
union gic_base dist_base;
union gic_base cpu_base;
+ void __iomem *raw_dist_base;
+ void __iomem *raw_cpu_base;
+ u32 percpu_offset;
#ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
@@ -344,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
#ifdef CONFIG_SMP
+ /*
+ * Ensure any shared data written by the CPU sending
+ * the IPI is read after we've read the ACK register
+ * on the GIC.
+ *
+ * Pairs with the write barrier in gic_raise_softirq
+ */
+ smp_rmb();
handle_IPI(irqnr, regs);
#endif
continue;
@@ -391,20 +402,6 @@ static struct irq_chip gic_chip = {
IRQCHIP_MASK_ON_SUSPEND,
};
-static struct irq_chip gic_eoimode1_chip = {
- .name = "GICv2",
- .irq_mask = gic_eoimode1_mask_irq,
- .irq_unmask = gic_unmask_irq,
- .irq_eoi = gic_eoimode1_eoi_irq,
- .irq_set_type = gic_set_type,
- .irq_get_irqchip_state = gic_irq_get_irqchip_state,
- .irq_set_irqchip_state = gic_irq_set_irqchip_state,
- .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
- .flags = IRQCHIP_SET_TYPE_MASKED |
- IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
-};
-
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
@@ -473,7 +470,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
}
-static void gic_cpu_init(struct gic_chip_data *gic)
+static int gic_cpu_init(struct gic_chip_data *gic)
{
void __iomem *dist_base = gic_data_dist_base(gic);
void __iomem *base = gic_data_cpu_base(gic);
@@ -489,7 +486,10 @@ static void gic_cpu_init(struct gic_chip_data *gic)
/*
* Get what the GIC says our CPU mask is.
*/
- BUG_ON(cpu >= NR_GIC_CPU_IF);
+ if (WARN_ON(cpu >= NR_GIC_CPU_IF))
+ return -EINVAL;
+
+ gic_check_cpu_features();
cpu_mask = gic_get_cpumask(gic);
gic_cpu_map[cpu] = cpu_mask;
@@ -506,6 +506,8 @@ static void gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic);
+
+ return 0;
}
int gic_cpu_if_down(unsigned int gic_nr)
@@ -531,34 +533,35 @@ int gic_cpu_if_down(unsigned int gic_nr)
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
-static void gic_dist_save(unsigned int gic_nr)
+static void gic_dist_save(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
void __iomem *dist_base;
int i;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- gic_irqs = gic_data[gic_nr].gic_irqs;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ gic_irqs = gic->gic_irqs;
+ dist_base = gic_data_dist_base(gic);
if (!dist_base)
return;
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
- gic_data[gic_nr].saved_spi_conf[i] =
+ gic->saved_spi_conf[i] =
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
- gic_data[gic_nr].saved_spi_target[i] =
+ gic->saved_spi_target[i] =
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
- gic_data[gic_nr].saved_spi_enable[i] =
+ gic->saved_spi_enable[i] =
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
- gic_data[gic_nr].saved_spi_active[i] =
+ gic->saved_spi_active[i] =
readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
}
@@ -569,16 +572,17 @@ static void gic_dist_save(unsigned int gic_nr)
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
-static void gic_dist_restore(unsigned int gic_nr)
+static void gic_dist_restore(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
unsigned int i;
void __iomem *dist_base;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- gic_irqs = gic_data[gic_nr].gic_irqs;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ gic_irqs = gic->gic_irqs;
+ dist_base = gic_data_dist_base(gic);
if (!dist_base)
return;
@@ -586,7 +590,7 @@ static void gic_dist_restore(unsigned int gic_nr)
writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
- writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+ writel_relaxed(gic->saved_spi_conf[i],
dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -594,85 +598,87 @@ static void gic_dist_restore(unsigned int gic_nr)
dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
- writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+ writel_relaxed(gic->saved_spi_target[i],
dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
- writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+ writel_relaxed(gic->saved_spi_enable[i],
dist_base + GIC_DIST_ENABLE_SET + i * 4);
}
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
- writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
+ writel_relaxed(gic->saved_spi_active[i],
dist_base + GIC_DIST_ACTIVE_SET + i * 4);
}
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
}
-static void gic_cpu_save(unsigned int gic_nr)
+static void gic_cpu_save(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
- cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+ dist_base = gic_data_dist_base(gic);
+ cpu_base = gic_data_cpu_base(gic);
if (!dist_base || !cpu_base)
return;
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+ ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
}
-static void gic_cpu_restore(unsigned int gic_nr)
+static void gic_cpu_restore(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
- cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+ dist_base = gic_data_dist_base(gic);
+ cpu_base = gic_data_cpu_base(gic);
if (!dist_base || !cpu_base)
return;
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
}
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+ ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
}
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
@@ -681,7 +687,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
- gic_cpu_if_up(&gic_data[gic_nr]);
+ gic_cpu_if_up(gic);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
@@ -696,18 +702,18 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
#endif
switch (cmd) {
case CPU_PM_ENTER:
- gic_cpu_save(i);
+ gic_cpu_save(&gic_data[i]);
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
- gic_cpu_restore(i);
+ gic_cpu_restore(&gic_data[i]);
break;
case CPU_CLUSTER_PM_ENTER:
- gic_dist_save(i);
+ gic_dist_save(&gic_data[i]);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
- gic_dist_restore(i);
+ gic_dist_restore(&gic_data[i]);
break;
}
}
@@ -719,26 +725,39 @@ static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
-static void __init gic_pm_init(struct gic_chip_data *gic)
+static int __init gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
- BUG_ON(!gic->saved_ppi_enable);
+ if (WARN_ON(!gic->saved_ppi_enable))
+ return -ENOMEM;
gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
- BUG_ON(!gic->saved_ppi_active);
+ if (WARN_ON(!gic->saved_ppi_active))
+ goto free_ppi_enable;
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
sizeof(u32));
- BUG_ON(!gic->saved_ppi_conf);
+ if (WARN_ON(!gic->saved_ppi_conf))
+ goto free_ppi_active;
if (gic == &gic_data[0])
cpu_pm_register_notifier(&gic_notifier_block);
+
+ return 0;
+
+free_ppi_active:
+ free_percpu(gic->saved_ppi_active);
+free_ppi_enable:
+ free_percpu(gic->saved_ppi_enable);
+
+ return -ENOMEM;
}
#else
-static void __init gic_pm_init(struct gic_chip_data *gic)
+static int __init gic_pm_init(struct gic_chip_data *gic)
{
+ return 0;
}
#endif
@@ -1011,63 +1030,63 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap,
};
-static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
- void __iomem *dist_base, void __iomem *cpu_base,
- u32 percpu_offset, struct fwnode_handle *handle)
+static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
+ struct fwnode_handle *handle)
{
irq_hw_number_t hwirq_base;
- struct gic_chip_data *gic;
- int gic_irqs, irq_base, i;
-
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
-
- gic_check_cpu_features();
+ int gic_irqs, irq_base, i, ret;
- gic = &gic_data[gic_nr];
+ if (WARN_ON(!gic || gic->domain))
+ return -EINVAL;
/* Initialize irq_chip */
- if (static_key_true(&supports_deactivate) && gic_nr == 0) {
- gic->chip = gic_eoimode1_chip;
+ gic->chip = gic_chip;
+
+ if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+ gic->chip.irq_mask = gic_eoimode1_mask_irq;
+ gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
+ gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
+ gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
} else {
- gic->chip = gic_chip;
- gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
+ gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
+ (int)(gic - &gic_data[0]));
}
#ifdef CONFIG_SMP
- if (gic_nr == 0)
+ if (gic == &gic_data[0])
gic->chip.irq_set_affinity = gic_set_affinity;
#endif
-#ifdef CONFIG_GIC_NON_BANKED
- if (percpu_offset) { /* Frankein-GIC without banked registers... */
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ /* Frankein-GIC without banked registers... */
unsigned int cpu;
gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
if (WARN_ON(!gic->dist_base.percpu_base ||
!gic->cpu_base.percpu_base)) {
- free_percpu(gic->dist_base.percpu_base);
- free_percpu(gic->cpu_base.percpu_base);
- return;
+ ret = -ENOMEM;
+ goto error;
}
for_each_possible_cpu(cpu) {
u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- unsigned long offset = percpu_offset * core_id;
- *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
- *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ unsigned long offset = gic->percpu_offset * core_id;
+ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
+ gic->raw_dist_base + offset;
+ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
+ gic->raw_cpu_base + offset;
}
gic_set_base_accessor(gic, gic_get_percpu_base);
- } else
-#endif
- { /* Normal, sane GIC... */
- WARN(percpu_offset,
+ } else {
+ /* Normal, sane GIC... */
+ WARN(gic->percpu_offset,
"GIC_NON_BANKED not enabled, ignoring %08x offset!",
- percpu_offset);
- gic->dist_base.common_base = dist_base;
- gic->cpu_base.common_base = cpu_base;
+ gic->percpu_offset);
+ gic->dist_base.common_base = gic->raw_dist_base;
+ gic->cpu_base.common_base = gic->raw_cpu_base;
gic_set_base_accessor(gic, gic_get_common_base);
}
@@ -1090,7 +1109,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
- if (gic_nr == 0 && (irq_start & 31) > 0) {
+ if (gic == &gic_data[0] && (irq_start & 31) > 0) {
hwirq_base = 16;
if (irq_start != -1)
irq_start = (irq_start & ~31) + 16;
@@ -1112,10 +1131,12 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
hwirq_base, &gic_irq_domain_ops, gic);
}
- if (WARN_ON(!gic->domain))
- return;
+ if (WARN_ON(!gic->domain)) {
+ ret = -ENODEV;
+ goto error;
+ }
- if (gic_nr == 0) {
+ if (gic == &gic_data[0]) {
/*
* Initialize the CPU interface map to all CPUs.
* It will be refined as each CPU probes its ID.
@@ -1133,19 +1154,57 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
}
gic_dist_init(gic);
- gic_cpu_init(gic);
- gic_pm_init(gic);
+ ret = gic_cpu_init(gic);
+ if (ret)
+ goto error;
+
+ ret = gic_pm_init(gic);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ }
+
+ kfree(gic->chip.name);
+
+ return ret;
}
void __init gic_init(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base)
{
+ struct gic_chip_data *gic;
+
+ if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
+ return;
+
/*
* Non-DT/ACPI systems won't run a hypervisor, so let's not
* bother with these...
*/
static_key_slow_dec(&supports_deactivate);
- __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
+
+ gic = &gic_data[gic_nr];
+ gic->raw_dist_base = dist_base;
+ gic->raw_cpu_base = cpu_base;
+
+ __gic_init_bases(gic, irq_start, NULL);
+}
+
+static void gic_teardown(struct gic_chip_data *gic)
+{
+ if (WARN_ON(!gic))
+ return;
+
+ if (gic->raw_dist_base)
+ iounmap(gic->raw_dist_base);
+ if (gic->raw_cpu_base)
+ iounmap(gic->raw_cpu_base);
}
#ifdef CONFIG_OF
@@ -1189,35 +1248,61 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true;
}
+static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+{
+ if (!gic || !node)
+ return -EINVAL;
+
+ gic->raw_dist_base = of_iomap(node, 0);
+ if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
+ goto error;
+
+ gic->raw_cpu_base = of_iomap(node, 1);
+ if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
+ goto error;
+
+ if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
+ gic->percpu_offset = 0;
+
+ return 0;
+
+error:
+ gic_teardown(gic);
+
+ return -ENOMEM;
+}
+
int __init
gic_of_init(struct device_node *node, struct device_node *parent)
{
- void __iomem *cpu_base;
- void __iomem *dist_base;
- u32 percpu_offset;
- int irq;
+ struct gic_chip_data *gic;
+ int irq, ret;
if (WARN_ON(!node))
return -ENODEV;
- dist_base = of_iomap(node, 0);
- WARN(!dist_base, "unable to map gic dist registers\n");
+ if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
+ return -EINVAL;
+
+ gic = &gic_data[gic_cnt];
- cpu_base = of_iomap(node, 1);
- WARN(!cpu_base, "unable to map gic cpu registers\n");
+ ret = gic_of_setup(gic, node);
+ if (ret)
+ return ret;
/*
* Disable split EOI/Deactivate if either HYP is not available
* or the CPU interface is too small.
*/
- if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
+ if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
static_key_slow_dec(&supports_deactivate);
- if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
- percpu_offset = 0;
+ ret = __gic_init_bases(gic, -1, &node->fwnode);
+ if (ret) {
+ gic_teardown(gic);
+ return ret;
+ }
- __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
- &node->fwnode);
if (!gic_cnt)
gic_init_physaddr(node);
@@ -1304,9 +1389,9 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_generic_distributor *dist;
- void __iomem *cpu_base, *dist_base;
struct fwnode_handle *domain_handle;
- int count;
+ struct gic_chip_data *gic = &gic_data[0];
+ int count, ret;
/* Collect CPU base addresses */
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
@@ -1316,17 +1401,18 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
return -EINVAL;
}
- cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
- if (!cpu_base) {
+ gic->raw_cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+ if (!gic->raw_cpu_base) {
pr_err("Unable to map GICC registers\n");
return -ENOMEM;
}
dist = (struct acpi_madt_generic_distributor *)header;
- dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE);
- if (!dist_base) {
+ gic->raw_dist_base = ioremap(dist->base_address,
+ ACPI_GICV2_DIST_MEM_SIZE);
+ if (!gic->raw_dist_base) {
pr_err("Unable to map GICD registers\n");
- iounmap(cpu_base);
+ gic_teardown(gic);
return -ENOMEM;
}
@@ -1341,15 +1427,20 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
/*
* Initialize GIC instance zero (no multi-GIC support).
*/
- domain_handle = irq_domain_alloc_fwnode(dist_base);
+ domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
- iounmap(cpu_base);
- iounmap(dist_base);
+ gic_teardown(gic);
return -ENOMEM;
}
- __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
+ ret = __gic_init_bases(gic, -1, domain_handle);
+ if (ret) {
+ pr_err("Failed to initialise GIC\n");
+ irq_domain_free_fwnode(domain_handle);
+ gic_teardown(gic);
+ return ret;
+ }
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
new file mode 100644
index 000000000000..1034aeb2e98a
--- /dev/null
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/exception.h>
+
+#define LPC32XX_INTC_MASK 0x00
+#define LPC32XX_INTC_RAW 0x04
+#define LPC32XX_INTC_STAT 0x08
+#define LPC32XX_INTC_POL 0x0C
+#define LPC32XX_INTC_TYPE 0x10
+#define LPC32XX_INTC_FIQ 0x14
+
+#define NR_LPC32XX_IC_IRQS 32
+
+struct lpc32xx_irq_chip {
+ void __iomem *base;
+ struct irq_domain *domain;
+ struct irq_chip chip;
+};
+
+static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
+
+static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
+{
+ return readl_relaxed(ic->base + reg);
+}
+
+static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, ic->base + reg);
+}
+
+static void lpc32xx_irq_mask(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_unmask(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_ack(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq);
+
+ lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
+}
+
+static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+ bool high, edge;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ edge = true;
+ high = true;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = true;
+ high = false;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge = false;
+ high = true;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge = false;
+ high = false;
+ break;
+ default:
+ pr_info("unsupported irq type %d\n", type);
+ return -EINVAL;
+ }
+
+ irqd_set_trigger_type(d, type);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
+ if (high)
+ val |= mask;
+ else
+ val &= ~mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
+ if (edge) {
+ val |= mask;
+ irq_set_handler_locked(d, handle_edge_irq);
+ } else {
+ val &= ~mask;
+ irq_set_handler_locked(d, handle_level_irq);
+ }
+ lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
+
+ return 0;
+}
+
+static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
+{
+ struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
+ u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+ while (hwirq) {
+ irq = __ffs(hwirq);
+ hwirq &= ~BIT(irq);
+ handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
+ }
+}
+
+static void lpc32xx_sic_handler(struct irq_desc *desc)
+{
+ struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+ chained_irq_enter(chip, desc);
+
+ while (hwirq) {
+ irq = __ffs(hwirq);
+ hwirq &= ~BIT(irq);
+ generic_handle_irq(irq_find_mapping(ic->domain, irq));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct lpc32xx_irq_chip *ic = id->host_data;
+
+ irq_set_chip_data(virq, ic);
+ irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
+{
+ irq_set_chip_and_handler(virq, NULL, NULL);
+}
+
+static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
+ .map = lpc32xx_irq_domain_map,
+ .unmap = lpc32xx_irq_domain_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __init lpc32xx_of_ic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct lpc32xx_irq_chip *irqc;
+ bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
+ const __be32 *reg = of_get_property(node, "reg", NULL);
+ u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->base = of_iomap(node, 0);
+ if (!irqc->base) {
+ pr_err("%s: unable to map registers\n", node->full_name);
+ kfree(irqc);
+ return -EINVAL;
+ }
+
+ irqc->chip.irq_ack = lpc32xx_irq_ack;
+ irqc->chip.irq_mask = lpc32xx_irq_mask;
+ irqc->chip.irq_unmask = lpc32xx_irq_unmask;
+ irqc->chip.irq_set_type = lpc32xx_irq_set_type;
+ if (is_mic)
+ irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
+ else
+ irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
+
+ irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
+ &lpc32xx_irq_domain_ops, irqc);
+ if (!irqc->domain) {
+ pr_err("unable to add irq domain\n");
+ iounmap(irqc->base);
+ kfree(irqc->chip.name);
+ kfree(irqc);
+ return -ENODEV;
+ }
+
+ if (is_mic) {
+ lpc32xx_mic_irqc = irqc;
+ set_handle_irq(lpc32xx_handle_irq);
+ } else {
+ for (i = 0; i < of_irq_count(node); i++) {
+ parent_irq = irq_of_parse_and_map(node, i);
+ if (parent_irq)
+ irq_set_chained_handler_and_data(parent_irq,
+ lpc32xx_sic_handler, irqc);
+ }
+ }
+
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_POL, 0x00);
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
+IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
new file mode 100644
index 000000000000..02cca74cab94
--- /dev/null
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -0,0 +1,240 @@
+/*
+ * Freescale SCFG MSI(-X) support
+ *
+ * Copyright (C) 2016 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define MSI_MAX_IRQS 32
+#define MSI_IBS_SHIFT 3
+#define MSIR 4
+
+struct ls_scfg_msi {
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct irq_domain *parent;
+ struct irq_domain *msi_domain;
+ void __iomem *regs;
+ phys_addr_t msiir_addr;
+ int irq;
+ DECLARE_BITMAP(used, MSI_MAX_IRQS);
+};
+
+static struct irq_chip ls_scfg_msi_irq_chip = {
+ .name = "MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info ls_scfg_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &ls_scfg_msi_irq_chip,
+};
+
+static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ msg->address_hi = upper_32_bits(msi_data->msiir_addr);
+ msg->address_lo = lower_32_bits(msi_data->msiir_addr);
+ msg->data = data->hwirq << MSI_IBS_SHIFT;
+}
+
+static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip ls_scfg_msi_parent_chip = {
+ .name = "SCFG",
+ .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
+ .irq_set_affinity = ls_scfg_msi_set_affinity,
+};
+
+static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct ls_scfg_msi *msi_data = domain->host_data;
+ int pos, err = 0;
+
+ WARN_ON(nr_irqs != 1);
+
+ spin_lock(&msi_data->lock);
+ pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
+ if (pos < MSI_MAX_IRQS)
+ __set_bit(pos, msi_data->used);
+ else
+ err = -ENOSPC;
+ spin_unlock(&msi_data->lock);
+
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, pos,
+ &ls_scfg_msi_parent_chip, msi_data,
+ handle_simple_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
+ int pos;
+
+ pos = d->hwirq;
+ if (pos < 0 || pos >= MSI_MAX_IRQS) {
+ pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
+ return;
+ }
+
+ spin_lock(&msi_data->lock);
+ __clear_bit(pos, msi_data->used);
+ spin_unlock(&msi_data->lock);
+}
+
+static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
+ .alloc = ls_scfg_msi_domain_irq_alloc,
+ .free = ls_scfg_msi_domain_irq_free,
+};
+
+static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
+{
+ struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
+ unsigned long val;
+ int pos, virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ val = ioread32be(msi_data->regs + MSIR);
+ for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
+ virq = irq_find_mapping(msi_data->parent, (31 - pos));
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
+{
+ /* Initialize MSI domain parent */
+ msi_data->parent = irq_domain_add_linear(NULL,
+ MSI_MAX_IRQS,
+ &ls_scfg_msi_domain_ops,
+ msi_data);
+ if (!msi_data->parent) {
+ dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_data->msi_domain = pci_msi_create_irq_domain(
+ of_node_to_fwnode(msi_data->pdev->dev.of_node),
+ &ls_scfg_msi_domain_info,
+ msi_data->parent);
+ if (!msi_data->msi_domain) {
+ dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi_data->parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ls_scfg_msi_probe(struct platform_device *pdev)
+{
+ struct ls_scfg_msi *msi_data;
+ struct resource *res;
+ int ret;
+
+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi_data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+ return PTR_ERR(msi_data->regs);
+ }
+ msi_data->msiir_addr = res->start;
+
+ msi_data->irq = platform_get_irq(pdev, 0);
+ if (msi_data->irq <= 0) {
+ dev_err(&pdev->dev, "failed to get MSI irq\n");
+ return -ENODEV;
+ }
+
+ msi_data->pdev = pdev;
+ spin_lock_init(&msi_data->lock);
+
+ ret = ls_scfg_msi_domains_init(msi_data);
+ if (ret)
+ return ret;
+
+ irq_set_chained_handler_and_data(msi_data->irq,
+ ls_scfg_msi_irq_handler,
+ msi_data);
+
+ platform_set_drvdata(pdev, msi_data);
+
+ return 0;
+}
+
+static int ls_scfg_msi_remove(struct platform_device *pdev)
+{
+ struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
+
+ irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
+
+ irq_domain_remove(msi_data->msi_domain);
+ irq_domain_remove(msi_data->parent);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id ls_scfg_msi_id[] = {
+ { .compatible = "fsl,1s1021a-msi", },
+ { .compatible = "fsl,1s1043a-msi", },
+ {},
+};
+
+static struct platform_driver ls_scfg_msi_driver = {
+ .driver = {
+ .name = "ls-scfg-msi",
+ .of_match_table = ls_scfg_msi_id,
+ },
+ .probe = ls_scfg_msi_probe,
+ .remove = ls_scfg_msi_remove,
+};
+
+module_platform_driver(ls_scfg_msi_driver);
+
+MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
+MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index d67baa231c13..03b79b061d24 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -263,8 +263,8 @@ static int mbigen_device_probe(struct platform_device *pdev)
parent = platform_bus_type.dev_root;
child = of_platform_device_create(np, NULL, parent);
- if (IS_ERR(child))
- return PTR_ERR(child);
+ if (!child)
+ return -ENOMEM;
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
new file mode 100644
index 000000000000..ccd72c2cbc23
--- /dev/null
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-partition-percpu.h>
+#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+struct partition_desc {
+ int nr_parts;
+ struct partition_affinity *parts;
+ struct irq_domain *domain;
+ struct irq_desc *chained_desc;
+ unsigned long *bitmap;
+ struct irq_domain_ops ops;
+};
+
+static bool partition_check_cpu(struct partition_desc *part,
+ unsigned int cpu, unsigned int hwirq)
+{
+ return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
+}
+
+static void partition_irq_mask(struct irq_data *d)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_mask)
+ chip->irq_mask(data);
+}
+
+static void partition_irq_unmask(struct irq_data *d)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_unmask)
+ chip->irq_unmask(data);
+}
+
+static int partition_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool val)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_set_irqchip_state)
+ return chip->irq_set_irqchip_state(data, which, val);
+
+ return -EINVAL;
+}
+
+static int partition_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *val)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_get_irqchip_state)
+ return chip->irq_get_irqchip_state(data, which, val);
+
+ return -EINVAL;
+}
+
+static int partition_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (chip->irq_set_type)
+ return chip->irq_set_type(data, type);
+
+ return -EINVAL;
+}
+
+static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+}
+
+static struct irq_chip partition_irq_chip = {
+ .irq_mask = partition_irq_mask,
+ .irq_unmask = partition_irq_unmask,
+ .irq_set_type = partition_irq_set_type,
+ .irq_get_irqchip_state = partition_irq_get_irqchip_state,
+ .irq_set_irqchip_state = partition_irq_set_irqchip_state,
+ .irq_print_chip = partition_irq_print_chip,
+};
+
+static void partition_handle_irq(struct irq_desc *desc)
+{
+ struct partition_desc *part = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int cpu = smp_processor_id();
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
+ if (partition_check_cpu(part, cpu, hwirq))
+ break;
+ }
+
+ if (unlikely(hwirq == part->nr_parts)) {
+ handle_bad_irq(desc);
+ } else {
+ unsigned int irq;
+ irq = irq_find_mapping(part->domain, hwirq);
+ generic_handle_irq(irq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+ struct partition_desc *part;
+
+ BUG_ON(nr_irqs != 1);
+ ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ part = domain->host_data;
+
+ set_bit(hwirq, part->bitmap);
+ irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
+ partition_handle_irq, part);
+ irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
+ irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
+ handle_percpu_devid_irq, NULL, NULL);
+ irq_set_status_flags(virq, IRQ_NOAUTOEN);
+
+ return 0;
+}
+
+static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+
+ BUG_ON(nr_irqs != 1);
+
+ d = irq_domain_get_irq_data(domain, virq);
+ irq_set_handler(virq, NULL);
+ irq_domain_reset_irq_data(d);
+}
+
+int partition_translate_id(struct partition_desc *desc, void *partition_id)
+{
+ struct partition_affinity *part = NULL;
+ int i;
+
+ for (i = 0; i < desc->nr_parts; i++) {
+ if (desc->parts[i].partition_id == partition_id) {
+ part = &desc->parts[i];
+ break;
+ }
+ }
+
+ if (WARN_ON(!part)) {
+ pr_err("Failed to find partition\n");
+ return -EINVAL;
+ }
+
+ return i;
+}
+
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+ struct partition_affinity *parts,
+ int nr_parts,
+ int chained_irq,
+ const struct irq_domain_ops *ops)
+{
+ struct partition_desc *desc;
+ struct irq_domain *d;
+
+ BUG_ON(!ops->select || !ops->translate);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->ops = *ops;
+ desc->ops.free = partition_domain_free;
+ desc->ops.alloc = partition_domain_alloc;
+
+ d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
+ if (!d)
+ goto out;
+ desc->domain = d;
+
+ desc->bitmap = kzalloc(sizeof(long) * BITS_TO_LONGS(nr_parts),
+ GFP_KERNEL);
+ if (WARN_ON(!desc->bitmap))
+ goto out;
+
+ desc->chained_desc = irq_to_desc(chained_irq);
+ desc->nr_parts = nr_parts;
+ desc->parts = parts;
+
+ return desc;
+out:
+ if (d)
+ irq_domain_remove(d);
+ kfree(desc);
+
+ return NULL;
+}
+
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+ if (dsc)
+ return dsc->domain;
+
+ return NULL;
+}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 50be9639e27e..e902f081e16c 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -235,7 +235,7 @@ static int tegra_ictlr_domain_translate(struct irq_domain *d,
return -EINVAL;
*hwirq = fwspec->param[1];
- *type = fwspec->param[2];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 225147863e02..5ae28340a98b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -413,10 +413,11 @@ config LEDS_INTEL_SS4200
tristate "LED driver for Intel NAS SS4200 series"
depends on LEDS_CLASS
depends on PCI && DMI
+ depends on X86
help
This option enables support for the Intel SS4200 series of
- Network Attached Storage servers. You may control the hard
- drive or power LEDs on the front panel. Using this driver
+ Network Attached Storage servers. You may control the hard
+ drive or power LEDs on the front panel. Using this driver
can stop the front LED from blinking after startup.
config LEDS_LT3593
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 2181581795d3..55fa65e1ae03 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -26,7 +26,7 @@
* Nests outside led_cdev->trigger_lock
*/
static DECLARE_RWSEM(triggers_list_lock);
-static LIST_HEAD(trigger_list);
+LIST_HEAD(trigger_list);
/* Used by LED Class */
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 61143f55597e..8229f063b483 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -127,6 +127,8 @@ static int create_gpio_led(const struct gpio_led *template,
led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ if (template->panic_indicator)
+ led_dat->cdev.flags |= LED_PANIC_INDICATOR;
ret = gpiod_direction_output(led_dat->gpiod, state);
if (ret < 0)
@@ -200,6 +202,8 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
if (fwnode_property_present(child, "retain-state-suspended"))
led.retain_state_suspended = 1;
+ if (fwnode_property_present(child, "panic-indicator"))
+ led.panic_indicator = 1;
ret = create_gpio_led(&led, &priv->leds[priv->num_leds],
dev, NULL);
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 046cb7008745..732eb86bc1a5 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -101,6 +101,19 @@ static struct dmi_system_id nas_led_whitelist[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
}
},
+ {
+ /*
+ * FUJITSU SIEMENS SCALEO Home Server/SS4200-E
+ * BIOS V090L 12/19/2007
+ */
+ .callback = ss4200_led_dmi_callback,
+ .ident = "Fujitsu Siemens SCALEO Home Server",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SCALEO Home Server"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
+ }
+ },
{}
};
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index c548ea10f0f0..45222a7f4f75 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -327,6 +327,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
int result;
result = choose_times(tca->bank[bank].ontime, &c1, &c2);
+ if (result < 0)
+ return;
dev_dbg(&tca->client->dev,
"Chose on times %d(%d) %d(%d) for %dms\n",
c1, time_codes[c1],
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index db3f20da7221..7d38e6b9a740 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -30,5 +30,6 @@ void led_set_brightness_nosleep(struct led_classdev *led_cdev,
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
+extern struct list_head trigger_list;
#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 5bda6a9b56bb..9893d911390d 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -41,6 +41,14 @@ config LEDS_TRIGGER_IDE_DISK
This allows LEDs to be controlled by IDE disk activity.
If unsure, say Y.
+config LEDS_TRIGGER_MTD
+ bool "LED MTD (NAND/NOR) Trigger"
+ depends on MTD
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by MTD activity.
+ If unsure, say N.
+
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
depends on LEDS_TRIGGERS
@@ -108,4 +116,14 @@ config LEDS_TRIGGER_CAMERA
This enables direct flash/torch on/off by the driver, kernel space.
If unsure, say Y.
+config LEDS_TRIGGER_PANIC
+ bool "LED Panic Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be configured to blink on a kernel panic.
+ Enabling this option will allow to mark certain LEDs as panic indicators,
+ allowing to blink them on a kernel panic, even if they are set to
+ a different trigger.
+ If unsure, say Y.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 1abf48dacf7e..8cc64a4f4e25 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o
obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
+obj-$(CONFIG_LEDS_TRIGGER_MTD) += ledtrig-mtd.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
@@ -8,3 +9,4 @@ obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
+obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
diff --git a/drivers/leds/trigger/ledtrig-ide-disk.c b/drivers/leds/trigger/ledtrig-ide-disk.c
index c02a3ac3cd2b..15123d389240 100644
--- a/drivers/leds/trigger/ledtrig-ide-disk.c
+++ b/drivers/leds/trigger/ledtrig-ide-disk.c
@@ -18,10 +18,11 @@
#define BLINK_DELAY 30
DEFINE_LED_TRIGGER(ledtrig_ide);
-static unsigned long ide_blink_delay = BLINK_DELAY;
void ledtrig_ide_activity(void)
{
+ unsigned long ide_blink_delay = BLINK_DELAY;
+
led_trigger_blink_oneshot(ledtrig_ide,
&ide_blink_delay, &ide_blink_delay, 0);
}
diff --git a/drivers/leds/trigger/ledtrig-mtd.c b/drivers/leds/trigger/ledtrig-mtd.c
new file mode 100644
index 000000000000..99b5b0a4d826
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-mtd.c
@@ -0,0 +1,45 @@
+/*
+ * LED MTD trigger
+ *
+ * Copyright 2016 Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ *
+ * Based on LED IDE-Disk Activity Trigger
+ *
+ * Copyright 2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+
+#define BLINK_DELAY 30
+
+DEFINE_LED_TRIGGER(ledtrig_mtd);
+DEFINE_LED_TRIGGER(ledtrig_nand);
+
+void ledtrig_mtd_activity(void)
+{
+ unsigned long blink_delay = BLINK_DELAY;
+
+ led_trigger_blink_oneshot(ledtrig_mtd,
+ &blink_delay, &blink_delay, 0);
+ led_trigger_blink_oneshot(ledtrig_nand,
+ &blink_delay, &blink_delay, 0);
+}
+EXPORT_SYMBOL(ledtrig_mtd_activity);
+
+static int __init ledtrig_mtd_init(void)
+{
+ led_trigger_register_simple("mtd", &ledtrig_mtd);
+ led_trigger_register_simple("nand-disk", &ledtrig_nand);
+
+ return 0;
+}
+device_initcall(ledtrig_mtd_init);
diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
new file mode 100644
index 000000000000..d735526b9db4
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-panic.c
@@ -0,0 +1,77 @@
+/*
+ * Kernel Panic LED Trigger
+ *
+ * Copyright 2016 Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/leds.h>
+#include "../leds.h"
+
+static struct led_trigger *trigger;
+
+/*
+ * This is called in a special context by the atomic panic
+ * notifier. This means the trigger can be changed without
+ * worrying about locking.
+ */
+static void led_trigger_set_panic(struct led_classdev *led_cdev)
+{
+ struct led_trigger *trig;
+
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (strcmp("panic", trig->name))
+ continue;
+ if (led_cdev->trigger)
+ list_del(&led_cdev->trig_list);
+ list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
+
+ /* Avoid the delayed blink path */
+ led_cdev->blink_delay_on = 0;
+ led_cdev->blink_delay_off = 0;
+
+ led_cdev->trigger = trig;
+ if (trig->activate)
+ trig->activate(led_cdev);
+ break;
+ }
+}
+
+static int led_trigger_panic_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct led_classdev *led_cdev;
+
+ list_for_each_entry(led_cdev, &leds_list, node)
+ if (led_cdev->flags & LED_PANIC_INDICATOR)
+ led_trigger_set_panic(led_cdev);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block led_trigger_panic_nb = {
+ .notifier_call = led_trigger_panic_notifier,
+};
+
+static long led_panic_blink(int state)
+{
+ led_trigger_event(trigger, state ? LED_FULL : LED_OFF);
+ return 0;
+}
+
+static int __init ledtrig_panic_init(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &led_trigger_panic_nb);
+
+ led_trigger_register_simple("panic", &trigger);
+ panic_blink = led_panic_blink;
+ return 0;
+}
+device_initcall(ledtrig_panic_init);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index adc162c7040d..6e9042e3d2a9 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -603,7 +603,7 @@ void __init lguest_arch_host_init(void)
* doing this.
*/
get_online_cpus();
- if (cpu_has_pge) { /* We have a broader idea of "global". */
+ if (boot_cpu_has(X86_FEATURE_PGE)) { /* We have a broader idea of "global". */
/* Remember that this was originally set (for cleanup). */
cpu_had_pge = 1;
/*
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 0dc9a80adb94..160c1a6838e1 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -30,23 +30,35 @@
#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>
-static LIST_HEAD(nvm_targets);
+static LIST_HEAD(nvm_tgt_types);
static LIST_HEAD(nvm_mgrs);
static LIST_HEAD(nvm_devices);
+static LIST_HEAD(nvm_targets);
static DECLARE_RWSEM(nvm_lock);
+static struct nvm_target *nvm_find_target(const char *name)
+{
+ struct nvm_target *tgt;
+
+ list_for_each_entry(tgt, &nvm_targets, list)
+ if (!strcmp(name, tgt->disk->disk_name))
+ return tgt;
+
+ return NULL;
+}
+
static struct nvm_tgt_type *nvm_find_target_type(const char *name)
{
struct nvm_tgt_type *tt;
- list_for_each_entry(tt, &nvm_targets, list)
+ list_for_each_entry(tt, &nvm_tgt_types, list)
if (!strcmp(name, tt->name))
return tt;
return NULL;
}
-int nvm_register_target(struct nvm_tgt_type *tt)
+int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
@@ -54,14 +66,14 @@ int nvm_register_target(struct nvm_tgt_type *tt)
if (nvm_find_target_type(tt->name))
ret = -EEXIST;
else
- list_add(&tt->list, &nvm_targets);
+ list_add(&tt->list, &nvm_tgt_types);
up_write(&nvm_lock);
return ret;
}
-EXPORT_SYMBOL(nvm_register_target);
+EXPORT_SYMBOL(nvm_register_tgt_type);
-void nvm_unregister_target(struct nvm_tgt_type *tt)
+void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
{
if (!tt)
return;
@@ -70,20 +82,20 @@ void nvm_unregister_target(struct nvm_tgt_type *tt)
list_del(&tt->list);
up_write(&nvm_lock);
}
-EXPORT_SYMBOL(nvm_unregister_target);
+EXPORT_SYMBOL(nvm_unregister_tgt_type);
void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
dma_addr_t *dma_handler)
{
- return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
+ return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
-void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
dma_addr_t dma_handler)
{
- dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
+ dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_free);
@@ -214,8 +226,8 @@ void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = dev_to_generic_addr(dev,
rqd->ppa_list[i]);
} else {
@@ -228,8 +240,8 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = generic_to_dev_addr(dev,
rqd->ppa_list[i]);
} else {
@@ -239,33 +251,36 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
- struct ppa_addr *ppas, int nr_ppas)
+ struct ppa_addr *ppas, int nr_ppas, int vblk)
{
int i, plane_cnt, pl_idx;
- if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
- rqd->nr_pages = 1;
+ if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+ rqd->nr_ppas = nr_ppas;
rqd->ppa_addr = ppas[0];
return 0;
}
- plane_cnt = dev->plane_mode;
- rqd->nr_pages = plane_cnt * nr_ppas;
-
- if (dev->ops->max_phys_sect < rqd->nr_pages)
- return -EINVAL;
-
+ rqd->nr_ppas = nr_ppas;
rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("nvm: failed to allocate dma memory\n");
return -ENOMEM;
}
- for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ if (!vblk) {
+ for (i = 0; i < nr_ppas; i++)
+ rqd->ppa_list[i] = ppas[i];
+ } else {
+ plane_cnt = dev->plane_mode;
+ rqd->nr_ppas *= plane_cnt;
+
for (i = 0; i < nr_ppas; i++) {
- ppas[i].g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ ppas[i].g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ }
}
}
@@ -292,7 +307,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
if (ret)
return ret;
@@ -322,11 +337,10 @@ static void nvm_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}
-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
- int opcode, int flags, void *buf, int len)
+int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+ int flags, void *buf, int len)
{
DECLARE_COMPLETION_ONSTACK(wait);
- struct nvm_rq rqd;
struct bio *bio;
int ret;
unsigned long hang_check;
@@ -335,23 +349,21 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
if (IS_ERR_OR_NULL(bio))
return -ENOMEM;
- memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+ nvm_generic_to_addr_mode(dev, rqd);
+
+ rqd->dev = dev;
+ rqd->opcode = opcode;
+ rqd->flags = flags;
+ rqd->bio = bio;
+ rqd->wait = &wait;
+ rqd->end_io = nvm_end_io_sync;
+
+ ret = dev->ops->submit_io(dev, rqd);
if (ret) {
bio_put(bio);
return ret;
}
- rqd.opcode = opcode;
- rqd.bio = bio;
- rqd.wait = &wait;
- rqd.dev = dev;
- rqd.end_io = nvm_end_io_sync;
- rqd.flags = flags;
- nvm_generic_to_addr_mode(dev, &rqd);
-
- ret = dev->ops->submit_io(dev, &rqd);
-
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
@@ -359,12 +371,113 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
else
wait_for_completion_io(&wait);
+ return rqd->error;
+}
+
+/**
+ * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
+ * take to free ppa list if necessary.
+ * @dev: device
+ * @ppa_list: user created ppa_list
+ * @nr_ppas: length of ppa_list
+ * @opcode: device opcode
+ * @flags: device flags
+ * @buf: data buffer
+ * @len: data buffer length
+ */
+int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
+ int nr_ppas, int opcode, int flags, void *buf, int len)
+{
+ struct nvm_rq rqd;
+
+ if (dev->ops->max_phys_sect < nr_ppas)
+ return -EINVAL;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ rqd.nr_ppas = nr_ppas;
+ if (nr_ppas > 1)
+ rqd.ppa_list = ppa_list;
+ else
+ rqd.ppa_addr = ppa_list[0];
+
+ return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+}
+EXPORT_SYMBOL(nvm_submit_ppa_list);
+
+/**
+ * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
+ * as single, dual, quad plane PPAs depending on device type.
+ * @dev: device
+ * @ppa: user created ppa_list
+ * @nr_ppas: length of ppa_list
+ * @opcode: device opcode
+ * @flags: device flags
+ * @buf: data buffer
+ * @len: data buffer length
+ */
+int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
+ int opcode, int flags, void *buf, int len)
+{
+ struct nvm_rq rqd;
+ int ret;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
+ if (ret)
+ return ret;
+
+ ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+
nvm_free_rqd_ppalist(dev, &rqd);
- return rqd.error;
+ return ret;
}
EXPORT_SYMBOL(nvm_submit_ppa);
+/*
+ * folds a bad block list from its plane representation to its virtual
+ * block representation. The fold is done in place and reduced size is
+ * returned.
+ *
+ * If any of the planes status are bad or grown bad block, the virtual block
+ * is marked bad. If not bad, the first plane state acts as the block state.
+ */
+int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+{
+ int blk, offset, pl, blktype;
+
+ if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+ return -EINVAL;
+
+ for (blk = 0; blk < dev->blks_per_lun; blk++) {
+ offset = blk * dev->plane_mode;
+ blktype = blks[offset];
+
+ /* Bad blocks on any planes take precedence over other types */
+ for (pl = 0; pl < dev->plane_mode; pl++) {
+ if (blks[offset + pl] &
+ (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+ blktype = blks[offset + pl];
+ break;
+ }
+ }
+
+ blks[blk] = blktype;
+ }
+
+ return dev->blks_per_lun;
+}
+EXPORT_SYMBOL(nvm_bb_tbl_fold);
+
+int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
+{
+ ppa = generic_to_dev_addr(dev, ppa);
+
+ return dev->ops->get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_bb_tbl);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
int i;
@@ -414,6 +527,7 @@ static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
struct nvm_id_group *grp = &id->groups[0];
+ int ret;
/* device values */
dev->nr_chnls = grp->num_ch;
@@ -421,6 +535,8 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->pgs_per_blk = grp->num_pg;
dev->blks_per_lun = grp->num_blk;
dev->nr_planes = grp->num_pln;
+ dev->fpg_size = grp->fpg_sz;
+ dev->pfpg_size = grp->fpg_sz * grp->num_pln;
dev->sec_size = grp->csecs;
dev->oob_size = grp->sos;
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
@@ -430,33 +546,16 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->plane_mode = NVM_PLANE_SINGLE;
dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp))
- return -ENOMEM;
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp))
- return -ENOMEM;
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- return -EINVAL;
- }
-
- if (!dev->lps_per_blk)
- pr_info("nvm: lower page programming table missing\n");
-
if (grp->mpos & 0x020202)
dev->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404)
dev->plane_mode = NVM_PLANE_QUAD;
+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* calculated values */
dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
@@ -468,11 +567,73 @@ static int nvm_core_init(struct nvm_dev *dev)
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
- INIT_LIST_HEAD(&dev->online_targets);
+
+ switch (grp->fmtype) {
+ case NVM_ID_FMTYPE_SLC:
+ if (nvm_init_slc_tbl(dev, grp)) {
+ ret = -ENOMEM;
+ goto err_fmtype;
+ }
+ break;
+ case NVM_ID_FMTYPE_MLC:
+ if (nvm_init_mlc_tbl(dev, grp)) {
+ ret = -ENOMEM;
+ goto err_fmtype;
+ }
+ break;
+ default:
+ pr_err("nvm: flash type not supported\n");
+ ret = -EINVAL;
+ goto err_fmtype;
+ }
+
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
return 0;
+err_fmtype:
+ kfree(dev->lun_map);
+ return ret;
+}
+
+static void nvm_remove_target(struct nvm_target *t)
+{
+ struct nvm_tgt_type *tt = t->type;
+ struct gendisk *tdisk = t->disk;
+ struct request_queue *q = tdisk->queue;
+
+ lockdep_assert_held(&nvm_lock);
+
+ del_gendisk(tdisk);
+ blk_cleanup_queue(q);
+
+ if (tt->exit)
+ tt->exit(tdisk->private_data);
+
+ put_disk(tdisk);
+
+ list_del(&t->list);
+ kfree(t);
+}
+
+static void nvm_free_mgr(struct nvm_dev *dev)
+{
+ struct nvm_target *tgt, *tmp;
+
+ if (!dev->mt)
+ return;
+
+ down_write(&nvm_lock);
+ list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) {
+ if (tgt->dev != dev)
+ continue;
+
+ nvm_remove_target(tgt);
+ }
+ up_write(&nvm_lock);
+
+ dev->mt->unregister_mgr(dev);
+ dev->mt = NULL;
}
static void nvm_free(struct nvm_dev *dev)
@@ -480,10 +641,10 @@ static void nvm_free(struct nvm_dev *dev)
if (!dev)
return;
- if (dev->mt)
- dev->mt->unregister_mgr(dev);
+ nvm_free_mgr(dev);
kfree(dev->lptbl);
+ kfree(dev->lun_map);
}
static int nvm_init(struct nvm_dev *dev)
@@ -530,8 +691,8 @@ err:
static void nvm_exit(struct nvm_dev *dev)
{
- if (dev->ppalist_pool)
- dev->ops->destroy_dma_pool(dev->ppalist_pool);
+ if (dev->dma_pool)
+ dev->ops->destroy_dma_pool(dev->dma_pool);
nvm_free(dev);
pr_info("nvm: successfully unloaded\n");
@@ -565,9 +726,9 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
if (dev->ops->max_phys_sect > 1) {
- dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
- if (!dev->ppalist_pool) {
- pr_err("nvm: could not create ppa pool\n");
+ dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
+ if (!dev->dma_pool) {
+ pr_err("nvm: could not create dma pool\n");
ret = -ENOMEM;
goto err_init;
}
@@ -613,7 +774,6 @@ void nvm_unregister(char *disk_name)
up_write(&nvm_lock);
nvm_exit(dev);
- kfree(dev->lun_map);
kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -645,12 +805,11 @@ static int nvm_create_target(struct nvm_dev *dev,
return -EINVAL;
}
- list_for_each_entry(t, &dev->online_targets, list) {
- if (!strcmp(create->tgtname, t->disk->disk_name)) {
- pr_err("nvm: target name already exists.\n");
- up_write(&nvm_lock);
- return -EINVAL;
- }
+ t = nvm_find_target(create->tgtname);
+ if (t) {
+ pr_err("nvm: target name already exists.\n");
+ up_write(&nvm_lock);
+ return -EINVAL;
}
up_write(&nvm_lock);
@@ -688,9 +847,10 @@ static int nvm_create_target(struct nvm_dev *dev,
t->type = tt;
t->disk = tdisk;
+ t->dev = dev;
down_write(&nvm_lock);
- list_add_tail(&t->list, &dev->online_targets);
+ list_add_tail(&t->list, &nvm_targets);
up_write(&nvm_lock);
return 0;
@@ -703,26 +863,6 @@ err_t:
return -ENOMEM;
}
-static void nvm_remove_target(struct nvm_target *t)
-{
- struct nvm_tgt_type *tt = t->type;
- struct gendisk *tdisk = t->disk;
- struct request_queue *q = tdisk->queue;
-
- lockdep_assert_held(&nvm_lock);
-
- del_gendisk(tdisk);
- blk_cleanup_queue(q);
-
- if (tt->exit)
- tt->exit(tdisk->private_data);
-
- put_disk(tdisk);
-
- list_del(&t->list);
- kfree(t);
-}
-
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
@@ -753,26 +893,19 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
{
- struct nvm_target *t = NULL;
- struct nvm_dev *dev;
- int ret = -1;
+ struct nvm_target *t;
down_write(&nvm_lock);
- list_for_each_entry(dev, &nvm_devices, devices)
- list_for_each_entry(t, &dev->online_targets, list) {
- if (!strcmp(remove->tgtname, t->disk->disk_name)) {
- nvm_remove_target(t);
- ret = 0;
- break;
- }
- }
- up_write(&nvm_lock);
-
- if (ret) {
+ t = nvm_find_target(remove->tgtname);
+ if (!t) {
pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
+ up_write(&nvm_lock);
return -EINVAL;
}
+ nvm_remove_target(t);
+ up_write(&nvm_lock);
+
return 0;
}
@@ -921,7 +1054,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
info->version[2] = NVM_VERSION_PATCH;
down_write(&nvm_lock);
- list_for_each_entry(tt, &nvm_targets, list) {
+ list_for_each_entry(tt, &nvm_tgt_types, list) {
struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
tgt->version[0] = tt->version[0];
@@ -1118,10 +1251,7 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
return -EINVAL;
}
- if (dev->mt) {
- dev->mt->unregister_mgr(dev);
- dev->mt = NULL;
- }
+ nvm_free_mgr(dev);
if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
return nvm_dev_factory(dev, fact.flags);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 72e124a3927d..ec9fb6876e38 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -129,27 +129,25 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
return 0;
}
-static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
- void *private)
+static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
+ u8 *blks, int nr_blks)
{
- struct gen_nvm *gn = private;
struct nvm_dev *dev = gn->dev;
struct gen_lun *lun;
struct nvm_block *blk;
int i;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
- for (i = 0; i < nr_blocks; i++) {
+ for (i = 0; i < nr_blks; i++) {
if (blks[i] == 0)
continue;
blk = &lun->vlun.blocks[i];
- if (!blk) {
- pr_err("gennvm: BB data is out of bounds.\n");
- return -EINVAL;
- }
-
list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
lun->vlun.nr_free_blocks--;
@@ -216,13 +214,21 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
struct gen_lun *lun;
struct nvm_block *block;
sector_t lun_iter, blk_iter, cur_block_id = 0;
- int ret;
+ int ret, nr_blks;
+ u8 *blks;
+
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
gennvm_for_each_lun(gn, lun, lun_iter) {
lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
dev->blks_per_lun);
- if (!lun->vlun.blocks)
+ if (!lun->vlun.blocks) {
+ kfree(blks);
return -ENOMEM;
+ }
for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
block = &lun->vlun.blocks[blk_iter];
@@ -246,14 +252,15 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ppa.ppa = 0;
ppa.g.ch = lun->vlun.chnl_id;
- ppa.g.lun = lun->vlun.id;
- ppa = generic_to_dev_addr(dev, ppa);
+ ppa.g.lun = lun->vlun.lun_id;
+
+ ret = nvm_get_bb_tbl(dev, ppa, blks);
+ if (ret)
+ pr_err("gennvm: could not get BB table\n");
- ret = dev->ops->get_bb_tbl(dev, ppa,
- dev->blks_per_lun,
- gennvm_block_bb, gn);
+ ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
if (ret)
- pr_err("gennvm: could not read BB table\n");
+ pr_err("gennvm: BB table map failed\n");
}
}
@@ -266,6 +273,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
}
}
+ kfree(blks);
return 0;
}
@@ -399,64 +407,60 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
spin_unlock(&vlun->lock);
}
-static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
- int type)
+static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
{
struct gen_nvm *gn = dev->mp;
struct gen_lun *lun;
struct nvm_block *blk;
- if (unlikely(ppa->g.ch > dev->nr_chnls ||
- ppa->g.lun > dev->luns_per_chnl ||
- ppa->g.blk > dev->blks_per_lun)) {
+ pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
+ ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
+
+ if (unlikely(ppa.g.ch > dev->nr_chnls ||
+ ppa.g.lun > dev->luns_per_chnl ||
+ ppa.g.blk > dev->blks_per_lun)) {
WARN_ON_ONCE(1);
pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
- ppa->g.ch, dev->nr_chnls,
- ppa->g.lun, dev->luns_per_chnl,
- ppa->g.blk, dev->blks_per_lun);
+ ppa.g.ch, dev->nr_chnls,
+ ppa.g.lun, dev->luns_per_chnl,
+ ppa.g.blk, dev->blks_per_lun);
return;
}
- lun = &gn->luns[ppa->g.lun * ppa->g.ch];
- blk = &lun->vlun.blocks[ppa->g.blk];
+ lun = &gn->luns[ppa.g.lun * ppa.g.ch];
+ blk = &lun->vlun.blocks[ppa.g.blk];
/* will be moved to bb list on put_blk from target */
blk->state = type;
}
-/* mark block bad. It is expected the target recover from the error. */
+/*
+ * mark block bad in gennvm. It is expected that the target recovers separately
+ */
static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
{
- int i;
-
- if (!dev->ops->set_bb_tbl)
- return;
-
- if (dev->ops->set_bb_tbl(dev, rqd, 1))
- return;
+ int bit = -1;
+ int max_secs = dev->ops->max_phys_sect;
+ void *comp_bits = &rqd->ppa_status;
nvm_addr_to_generic_mode(dev, rqd);
/* look up blocks and mark them as bad */
- if (rqd->nr_pages > 1)
- for (i = 0; i < rqd->nr_pages; i++)
- gennvm_blk_set_type(dev, &rqd->ppa_list[i],
- NVM_BLK_ST_BAD);
- else
- gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
+ if (rqd->nr_ppas == 1) {
+ gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
+ return;
+ }
+
+ while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
+ gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
}
static void gennvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_instance *ins = rqd->ins;
- switch (rqd->error) {
- case NVM_RSP_SUCCESS:
- case NVM_RSP_ERR_EMPTYPAGE:
- break;
- case NVM_RSP_ERR_FAILWRITE:
+ if (rqd->error == NVM_RSP_ERR_FAILWRITE)
gennvm_mark_blk_bad(rqd->dev, rqd);
- }
ins->tt->end_io(rqd);
}
@@ -539,6 +543,8 @@ static struct nvmm_type gennvm = {
.submit_io = gennvm_submit_io,
.erase_blk = gennvm_erase_blk,
+ .mark_blk = gennvm_mark_blk,
+
.get_lun = gennvm_get_lun,
.reserve_lun = gennvm_reserve_lun,
.release_lun = gennvm_release_lun,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 3ab6495c3fd8..2103e97a974f 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -405,9 +405,8 @@ static void rrpc_block_gc(struct work_struct *work)
ws_gc);
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
struct nvm_dev *dev = rrpc->dev;
- struct nvm_lun *lun = rblk->parent->lun;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
@@ -508,9 +507,9 @@ static void rrpc_gc_queue(struct work_struct *work)
ws_gc);
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
struct nvm_lun *lun = rblk->parent->lun;
struct nvm_block *blk = rblk->parent;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
@@ -696,7 +695,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
{
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_pages;
+ uint8_t npages = rqd->nr_ppas;
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
if (bio_data_dir(rqd->bio) == WRITE)
@@ -711,8 +710,6 @@ static void rrpc_end_io(struct nvm_rq *rqd)
if (npages > 1)
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
- if (rqd->metadata)
- nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
mempool_free(rqd, rrpc->rq_pool);
}
@@ -886,7 +883,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_get(bio);
rqd->bio = bio;
rqd->ins = &rrpc->instance;
- rqd->nr_pages = nr_pages;
+ rqd->nr_ppas = nr_pages;
rrq->flags = flags;
err = nvm_submit_io(rrpc->dev, rqd);
@@ -895,7 +892,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_put(bio);
if (!(flags & NVM_IOTYPE_GC)) {
rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_pages > 1)
+ if (rqd->nr_ppas > 1)
nvm_dev_dma_free(rrpc->dev,
rqd->ppa_list, rqd->dma_ppa_list);
}
@@ -1039,11 +1036,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
- u64 slba;
int ret;
- slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
-
rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
if (!rrpc->trans_map)
return -ENOMEM;
@@ -1065,8 +1059,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
- rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+ rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1207,10 +1201,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
-
- rrpc->total_blocks += dev->blks_per_lun;
- rrpc->nr_sects += dev->sec_per_lun;
-
}
return 0;
@@ -1224,18 +1214,24 @@ static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
struct nvm_dev *dev = rrpc->dev;
struct nvmm_type *mt = dev->mt;
sector_t size = rrpc->nr_sects * dev->sec_size;
+ int ret;
size >>= 9;
- return mt->get_area(dev, begin, size);
+ ret = mt->get_area(dev, begin, size);
+ if (!ret)
+ *begin >>= (ilog2(dev->sec_size) - 9);
+
+ return ret;
}
static void rrpc_area_free(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
struct nvmm_type *mt = dev->mt;
+ sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
- mt->put_area(dev, rrpc->soffset);
+ mt->put_area(dev, begin);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1268,7 +1264,7 @@ static sector_t rrpc_capacity(void *private)
sector_t reserved, provisioned;
/* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
+ reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
provisioned = rrpc->nr_sects - reserved;
if (reserved > rrpc->nr_sects) {
@@ -1388,6 +1384,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
rrpc->nr_luns = lun_end - lun_begin + 1;
+ rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
+ rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
@@ -1468,12 +1466,12 @@ static struct nvm_tgt_type tt_rrpc = {
static int __init rrpc_module_init(void)
{
- return nvm_register_target(&tt_rrpc);
+ return nvm_register_tgt_type(&tt_rrpc);
}
static void rrpc_module_exit(void)
{
- nvm_unregister_target(&tt_rrpc);
+ nvm_unregister_tgt_type(&tt_rrpc);
}
module_init(rrpc_module_init);
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 2653484a3b40..87e84b5fc1cc 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -251,7 +251,7 @@ static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
{
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- uint8_t pages = rqd->nr_pages;
+ uint8_t pages = rqd->nr_ppas;
BUG_ON((r->l_start + pages) > rrpc->nr_sects);
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 321de1f154c5..994697ac786e 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -93,12 +93,51 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
}
-static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks,
+ struct sysblk_scan *s)
+{
+ struct ppa_addr *sppa;
+ int i, blkid = 0;
+
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_HOST)
+ return -EEXIST;
+
+ if (blks[i] != NVM_BLK_T_FREE)
+ continue;
+
+ sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
+ sppa->g.ch = ppa.g.ch;
+ sppa->g.lun = ppa.g.lun;
+ sppa->g.blk = i;
+ s->nr_ppas++;
+ blkid++;
+
+ pr_debug("nvm: use (%u %u %u) as sysblk\n",
+ sppa->g.ch, sppa->g.lun, sppa->g.blk);
+ if (blkid > MAX_BLKS_PR_SYSBLK - 1)
+ return 0;
+ }
+
+ pr_err("nvm: sysblk failed get sysblk\n");
+ return -EINVAL;
+}
+
+static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks,
+ struct sysblk_scan *s)
{
- struct sysblk_scan *s = private;
int i, nr_sysblk = 0;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
for (i = 0; i < nr_blks; i++) {
if (blks[i] != NVM_BLK_T_HOST)
continue;
@@ -119,26 +158,42 @@ static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
}
static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
- struct ppa_addr *ppas, nvm_bb_update_fn *fn)
+ struct ppa_addr *ppas, int get_free)
{
- struct ppa_addr dppa;
- int i, ret;
+ int i, nr_blks, ret = 0;
+ u8 *blks;
s->nr_ppas = 0;
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
for (i = 0; i < s->nr_rows; i++) {
- dppa = generic_to_dev_addr(dev, ppas[i]);
s->row = i;
- ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
+ ret = nvm_get_bb_tbl(dev, ppas[i], blks);
if (ret) {
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
ppas[i].g.ch,
ppas[i].g.blk);
- return ret;
+ goto err_get;
}
+
+ if (get_free)
+ ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
+ s);
+ else
+ ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
+ s);
+
+ if (ret)
+ goto err_get;
}
+err_get:
+ kfree(blks);
return ret;
}
@@ -154,13 +209,12 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
struct nvm_system_block *sblk)
{
struct nvm_system_block *cur;
- int pg, cursz, ret, found = 0;
+ int pg, ret, found = 0;
/* the full buffer for a flash page is allocated. Only the first of it
* contains the system block information
*/
- cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
- cur = kmalloc(cursz, GFP_KERNEL);
+ cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
if (!cur)
return -ENOMEM;
@@ -169,7 +223,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
ppa->g.pg = ppa_to_slc(dev, pg);
ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
- cur, cursz);
+ cur, dev->pfpg_size);
if (ret) {
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -223,10 +277,10 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
memset(&rqd, 0, sizeof(struct nvm_rq));
- nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
+ nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
nvm_generic_to_addr_mode(dev, &rqd);
- ret = dev->ops->set_bb_tbl(dev, &rqd, type);
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
nvm_free_rqd_ppalist(dev, &rqd);
if (ret) {
pr_err("nvm: sysblk failed bb mark\n");
@@ -236,50 +290,17 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
return 0;
}
-static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
-{
- struct sysblk_scan *s = private;
- struct ppa_addr *sppa;
- int i, blkid = 0;
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_HOST)
- return -EEXIST;
-
- if (blks[i] != NVM_BLK_T_FREE)
- continue;
-
- sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
- sppa->g.ch = ppa.g.ch;
- sppa->g.lun = ppa.g.lun;
- sppa->g.blk = i;
- s->nr_ppas++;
- blkid++;
-
- pr_debug("nvm: use (%u %u %u) as sysblk\n",
- sppa->g.ch, sppa->g.lun, sppa->g.blk);
- if (blkid > MAX_BLKS_PR_SYSBLK - 1)
- return 0;
- }
-
- pr_err("nvm: sysblk failed get sysblk\n");
- return -EINVAL;
-}
-
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
struct sysblk_scan *s)
{
struct nvm_system_block nvmsb;
void *buf;
- int i, sect, ret, bufsz;
+ int i, sect, ret = 0;
struct ppa_addr *ppas;
nvm_cpu_to_sysblk(&nvmsb, info);
- /* buffer for flash page */
- bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
- buf = kzalloc(bufsz, GFP_KERNEL);
+ buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
@@ -309,7 +330,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
}
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
- NVM_IO_SLC_MODE, buf, bufsz);
+ NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed program (%u %u %u)\n",
ppas[0].g.ch,
@@ -319,7 +340,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
}
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
- NVM_IO_SLC_MODE, buf, bufsz);
+ NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed read (%u %u %u)\n",
ppas[0].g.ch,
@@ -388,7 +409,7 @@ int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (ret)
goto err_sysblk;
@@ -448,7 +469,7 @@ int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (ret)
goto err_sysblk;
@@ -546,7 +567,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
if (ret)
goto err_mark;
@@ -561,52 +582,49 @@ err_mark:
return ret;
}
-struct factory_blks {
- struct nvm_dev *dev;
- int flags;
- unsigned long *blks;
-};
-
static int factory_nblks(int nblks)
{
/* Round up to nearest BITS_PER_LONG */
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
}
-static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
+static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
{
int nblks = factory_nblks(dev->blks_per_lun);
- return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
+ return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
BITS_PER_LONG;
}
-static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks,
+ unsigned long *blk_bitmap, int flags)
{
- struct factory_blks *f = private;
- struct nvm_dev *dev = f->dev;
int i, lunoff;
- lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ lunoff = factory_blk_offset(dev, ppa);
/* non-set bits correspond to the block must be erased */
for (i = 0; i < nr_blks; i++) {
switch (blks[i]) {
case NVM_BLK_T_FREE:
- if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
- set_bit(i, &f->blks[lunoff]);
+ if (flags & NVM_FACTORY_ERASE_ONLY_USER)
+ set_bit(i, &blk_bitmap[lunoff]);
break;
case NVM_BLK_T_HOST:
- if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
- set_bit(i, &f->blks[lunoff]);
+ if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
+ set_bit(i, &blk_bitmap[lunoff]);
break;
case NVM_BLK_T_GRWN_BAD:
- if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
- set_bit(i, &f->blks[lunoff]);
+ if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
+ set_bit(i, &blk_bitmap[lunoff]);
break;
default:
- set_bit(i, &f->blks[lunoff]);
+ set_bit(i, &blk_bitmap[lunoff]);
break;
}
}
@@ -615,7 +633,7 @@ static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
}
static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
- int max_ppas, struct factory_blks *f)
+ int max_ppas, unsigned long *blk_bitmap)
{
struct ppa_addr ppa;
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
@@ -623,111 +641,95 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
while (!done) {
done = 1;
- for (ch = 0; ch < dev->nr_chnls; ch++) {
- for (lun = 0; lun < dev->luns_per_chnl; lun++) {
- idx = factory_blk_offset(dev, ch, lun);
- offset = &f->blks[idx];
-
- blkid = find_first_zero_bit(offset,
- dev->blks_per_lun);
- if (blkid >= dev->blks_per_lun)
- continue;
- set_bit(blkid, offset);
-
- ppa.ppa = 0;
- ppa.g.ch = ch;
- ppa.g.lun = lun;
- ppa.g.blk = blkid;
- pr_debug("nvm: erase ppa (%u %u %u)\n",
- ppa.g.ch,
- ppa.g.lun,
- ppa.g.blk);
-
- erase_list[ppa_cnt] = ppa;
- ppa_cnt++;
- done = 0;
-
- if (ppa_cnt == max_ppas)
- return ppa_cnt;
- }
+ nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ idx = factory_blk_offset(dev, ppa);
+ offset = &blk_bitmap[idx];
+
+ blkid = find_first_zero_bit(offset,
+ dev->blks_per_lun);
+ if (blkid >= dev->blks_per_lun)
+ continue;
+ set_bit(blkid, offset);
+
+ ppa.g.blk = blkid;
+ pr_debug("nvm: erase ppa (%u %u %u)\n",
+ ppa.g.ch,
+ ppa.g.lun,
+ ppa.g.blk);
+
+ erase_list[ppa_cnt] = ppa;
+ ppa_cnt++;
+ done = 0;
+
+ if (ppa_cnt == max_ppas)
+ return ppa_cnt;
}
}
return ppa_cnt;
}
-static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
- nvm_bb_update_fn *fn, void *priv)
+static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
+ int flags)
{
- struct ppa_addr dev_ppa;
- int ret;
+ struct ppa_addr ppa;
+ int ch, lun, nr_blks, ret = 0;
+ u8 *blks;
- dev_ppa = generic_to_dev_addr(dev, ppa);
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
- ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
- if (ret)
- pr_err("nvm: failed bb tbl for ch%u lun%u\n",
+ nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ ret = nvm_get_bb_tbl(dev, ppa, blks);
+ if (ret)
+ pr_err("nvm: failed bb tbl for ch%u lun%u\n",
ppa.g.ch, ppa.g.blk);
- return ret;
-}
-static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
-{
- int ch, lun, ret;
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- for (ch = 0; ch < dev->nr_chnls; ch++) {
- for (lun = 0; lun < dev->luns_per_chnl; lun++) {
- ppa.g.ch = ch;
- ppa.g.lun = lun;
-
- ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
- f);
- if (ret)
- return ret;
- }
+ ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
+ flags);
+ if (ret)
+ break;
}
- return 0;
+ kfree(blks);
+ return ret;
}
int nvm_dev_factory(struct nvm_dev *dev, int flags)
{
- struct factory_blks f;
struct ppa_addr *ppas;
int ppa_cnt, ret = -ENOMEM;
int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
+ unsigned long *blk_bitmap;
- f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+ blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
GFP_KERNEL);
- if (!f.blks)
+ if (!blk_bitmap)
return ret;
ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas)
goto err_blks;
- f.dev = dev;
- f.flags = flags;
-
/* create list of blks to be erased */
- ret = nvm_fact_select_blks(dev, &f);
+ ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
if (ret)
goto err_ppas;
/* continue to erase until list of blks until empty */
- while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
+ while ((ppa_cnt =
+ nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
nvm_erase_ppa(dev, ppas, ppa_cnt);
/* mark host reserved blocks free */
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
- sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (!ret)
ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
mutex_unlock(&dev->mlock);
@@ -735,7 +737,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
err_ppas:
kfree(ppas);
err_blks:
- kfree(f.blks);
+ kfree(blk_bitmap);
return ret;
}
EXPORT_SYMBOL(nvm_dev_factory);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a296425a7270..f5dbb4e884d8 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -816,7 +816,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
- blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
+ blk_queue_write_cache(q, true, true);
return 0;
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2adf81d81fca..2c7ca258c4e4 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1723,7 +1723,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
if (!dmi) {
unsigned noio_flag;
noio_flag = memalloc_noio_save();
- dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
+ dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
memalloc_noio_restore(noio_flag);
if (dmi)
*param_flags |= DM_PARAMS_VMALLOC;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 677ba223e2ae..52baf8a5b0f4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -76,26 +76,18 @@ struct multipath {
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
- unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
-
- unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- bool queue_io:1; /* Must we queue all I/O? */
- bool queue_if_no_path:1; /* Queue I/O if last path fails? */
- bool saved_queue_if_no_path:1; /* Saved state during suspension */
- bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
- bool pg_init_disabled:1; /* pg_init is not currently allowed */
- bool pg_init_required:1; /* pg_init needs calling? */
- bool pg_init_delay_retry:1; /* Delay pg_init retry? */
+ unsigned long flags; /* Multipath state flags */
unsigned pg_init_retries; /* Number of times to retry pg_init */
- unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
- struct work_struct trigger_event;
+ atomic_t nr_valid_paths; /* Total number of usable paths */
+ atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
+ atomic_t pg_init_count; /* Number of times pg_init called */
/*
* We must use a mempool of dm_mpath_io structs so that we
@@ -104,6 +96,7 @@ struct multipath {
mempool_t *mpio_pool;
struct mutex work_mutex;
+ struct work_struct trigger_event;
};
/*
@@ -122,6 +115,17 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+/*-----------------------------------------------
+ * Multipath state flags.
+ *-----------------------------------------------*/
+
+#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
+#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
+#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
+#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
+#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
+#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
+#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
/*-----------------------------------------------
* Allocation routines
@@ -189,7 +193,10 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- m->queue_io = true;
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
+ atomic_set(&m->nr_valid_paths, 0);
+ atomic_set(&m->pg_init_in_progress, 0);
+ atomic_set(&m->pg_init_count, 0);
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
@@ -274,17 +281,17 @@ static int __pg_init_all_paths(struct multipath *m)
struct pgpath *pgpath;
unsigned long pg_init_delay = 0;
- if (m->pg_init_in_progress || m->pg_init_disabled)
+ if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
return 0;
- m->pg_init_count++;
- m->pg_init_required = false;
+ atomic_inc(&m->pg_init_count);
+ clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
/* Check here to reset pg_init_required */
if (!m->current_pg)
return 0;
- if (m->pg_init_delay_retry)
+ if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
@@ -293,65 +300,99 @@ static int __pg_init_all_paths(struct multipath *m)
continue;
if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
pg_init_delay))
- m->pg_init_in_progress++;
+ atomic_inc(&m->pg_init_in_progress);
}
- return m->pg_init_in_progress;
+ return atomic_read(&m->pg_init_in_progress);
+}
+
+static int pg_init_all_paths(struct multipath *m)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ r = __pg_init_all_paths(m);
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return r;
}
-static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
+static void __switch_pg(struct multipath *m, struct priority_group *pg)
{
- m->current_pg = pgpath->pg;
+ m->current_pg = pg;
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
- m->pg_init_required = true;
- m->queue_io = true;
+ set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
} else {
- m->pg_init_required = false;
- m->queue_io = false;
+ clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
}
- m->pg_init_count = 0;
+ atomic_set(&m->pg_init_count, 0);
}
-static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
- size_t nr_bytes)
+static struct pgpath *choose_path_in_pg(struct multipath *m,
+ struct priority_group *pg,
+ size_t nr_bytes)
{
+ unsigned long flags;
struct dm_path *path;
+ struct pgpath *pgpath;
path = pg->ps.type->select_path(&pg->ps, nr_bytes);
if (!path)
- return -ENXIO;
+ return ERR_PTR(-ENXIO);
- m->current_pgpath = path_to_pgpath(path);
+ pgpath = path_to_pgpath(path);
- if (m->current_pg != pg)
- __switch_pg(m, m->current_pgpath);
+ if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+ /* Only update current_pgpath if pg changed */
+ spin_lock_irqsave(&m->lock, flags);
+ m->current_pgpath = pgpath;
+ __switch_pg(m, pg);
+ spin_unlock_irqrestore(&m->lock, flags);
+ }
- return 0;
+ return pgpath;
}
-static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
+static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
{
+ unsigned long flags;
struct priority_group *pg;
+ struct pgpath *pgpath;
bool bypassed = true;
- if (!m->nr_valid_paths) {
- m->queue_io = false;
+ if (!atomic_read(&m->nr_valid_paths)) {
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed;
}
/* Were we instructed to switch PG? */
- if (m->next_pg) {
+ if (lockless_dereference(m->next_pg)) {
+ spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
+ if (!pg) {
+ spin_unlock_irqrestore(&m->lock, flags);
+ goto check_current_pg;
+ }
m->next_pg = NULL;
- if (!__choose_path_in_pg(m, pg, nr_bytes))
- return;
+ spin_unlock_irqrestore(&m->lock, flags);
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath))
+ return pgpath;
}
/* Don't change PG until it has no remaining paths */
- if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
- return;
+check_current_pg:
+ pg = lockless_dereference(m->current_pg);
+ if (pg) {
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath))
+ return pgpath;
+ }
/*
* Loop through priority groups until we find a valid path.
@@ -363,34 +404,38 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath)) {
if (!bypassed)
- m->pg_init_delay_retry = true;
- return;
+ set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+ return pgpath;
}
}
} while (bypassed--);
failed:
+ spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = NULL;
m->current_pg = NULL;
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return NULL;
}
/*
* Check whether bios must be queued in the device-mapper core rather
* than here in the target.
*
- * m->lock must be held on entry.
- *
* If m->queue_if_no_path and m->saved_queue_if_no_path hold the
* same value then we are not between multipath_presuspend()
* and multipath_resume() calls and we have no need to check
* for the DMF_NOFLUSH_SUSPENDING flag.
*/
-static int __must_push_back(struct multipath *m)
+static int must_push_back(struct multipath *m)
{
- return (m->queue_if_no_path ||
- (m->queue_if_no_path != m->saved_queue_if_no_path &&
+ return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
+ ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
dm_noflush_suspending(m->ti)));
}
@@ -408,35 +453,31 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
struct block_device *bdev;
struct dm_mpath_io *mpio;
- spin_lock_irq(&m->lock);
-
/* Do we need to select a new pgpath? */
- if (!m->current_pgpath || !m->queue_io)
- __choose_pgpath(m, nr_bytes);
-
- pgpath = m->current_pgpath;
+ pgpath = lockless_dereference(m->current_pgpath);
+ if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
+ pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (!__must_push_back(m))
+ if (!must_push_back(m))
r = -EIO; /* Failed */
- goto out_unlock;
- } else if (m->queue_io || m->pg_init_required) {
- __pg_init_all_paths(m);
- goto out_unlock;
+ return r;
+ } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
+ test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+ pg_init_all_paths(m);
+ return r;
}
mpio = set_mpio(m, map_context);
if (!mpio)
/* ENOMEM, requeue */
- goto out_unlock;
+ return r;
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
bdev = pgpath->path.dev->bdev;
- spin_unlock_irq(&m->lock);
-
if (clone) {
/*
* Old request-based interface: allocated clone is passed in.
@@ -468,11 +509,6 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
&pgpath->path,
nr_bytes);
return DM_MAPIO_REMAPPED;
-
-out_unlock:
- spin_unlock_irq(&m->lock);
-
- return r;
}
static int multipath_map(struct dm_target *ti, struct request *clone,
@@ -503,11 +539,22 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
spin_lock_irqsave(&m->lock, flags);
- if (save_old_value)
- m->saved_queue_if_no_path = m->queue_if_no_path;
+ if (save_old_value) {
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ else
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ } else {
+ if (queue_if_no_path)
+ set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ else
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
+ if (queue_if_no_path)
+ set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
else
- m->saved_queue_if_no_path = queue_if_no_path;
- m->queue_if_no_path = queue_if_no_path;
+ clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path)
@@ -600,10 +647,10 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
goto bad;
}
- if (m->retain_attached_hw_handler || m->hw_handler_name)
+ if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
q = bdev_get_queue(p->path.dev->bdev);
- if (m->retain_attached_hw_handler) {
+ if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
@@ -808,7 +855,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
}
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
- m->retain_attached_hw_handler = true;
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
continue;
}
@@ -884,6 +931,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
+ unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
pg = parse_priority_group(&as, m);
if (IS_ERR(pg)) {
@@ -891,7 +939,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
goto bad;
}
- m->nr_valid_paths += pg->nr_pgpaths;
+ nr_valid_paths += pg->nr_pgpaths;
+ atomic_set(&m->nr_valid_paths, nr_valid_paths);
+
list_add_tail(&pg->list, &m->priority_groups);
pg_count++;
pg->pg_num = pg_count;
@@ -921,19 +971,14 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
static void multipath_wait_for_pg_init_completion(struct multipath *m)
{
DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
add_wait_queue(&m->pg_init_wait, &wait);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock_irqsave(&m->lock, flags);
- if (!m->pg_init_in_progress) {
- spin_unlock_irqrestore(&m->lock, flags);
+ if (!atomic_read(&m->pg_init_in_progress))
break;
- }
- spin_unlock_irqrestore(&m->lock, flags);
io_schedule();
}
@@ -944,20 +989,16 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
- unsigned long flags;
-
- spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = true;
- spin_unlock_irqrestore(&m->lock, flags);
+ set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
- spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = false;
- spin_unlock_irqrestore(&m->lock, flags);
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
}
static void multipath_dtr(struct dm_target *ti)
@@ -987,13 +1028,13 @@ static int fail_path(struct pgpath *pgpath)
pgpath->is_active = false;
pgpath->fail_count++;
- m->nr_valid_paths--;
+ atomic_dec(&m->nr_valid_paths);
if (pgpath == m->current_pgpath)
m->current_pgpath = NULL;
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
schedule_work(&m->trigger_event);
@@ -1011,6 +1052,7 @@ static int reinstate_path(struct pgpath *pgpath)
int r = 0, run_queue = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
+ unsigned nr_valid_paths;
spin_lock_irqsave(&m->lock, flags);
@@ -1025,16 +1067,17 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->is_active = true;
- if (!m->nr_valid_paths++) {
+ nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
+ if (nr_valid_paths == 1) {
m->current_pgpath = NULL;
run_queue = 1;
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
- m->pg_init_in_progress++;
+ atomic_inc(&m->pg_init_in_progress);
}
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.dev->name, nr_valid_paths);
schedule_work(&m->trigger_event);
@@ -1152,8 +1195,9 @@ static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
- m->pg_init_required = true;
+ if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
+ !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
+ set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
else
limit_reached = true;
@@ -1219,19 +1263,23 @@ static void pg_init_done(void *data, int errors)
m->current_pgpath = NULL;
m->current_pg = NULL;
}
- } else if (!m->pg_init_required)
+ } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg->bypassed = false;
- if (--m->pg_init_in_progress)
+ if (atomic_dec_return(&m->pg_init_in_progress) > 0)
/* Activations of other paths are still on going */
goto out;
- if (m->pg_init_required) {
- m->pg_init_delay_retry = delay_retry;
+ if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+ if (delay_retry)
+ set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+ else
+ clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+
if (__pg_init_all_paths(m))
goto out;
}
- m->queue_io = false;
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
/*
* Wake up any thread waiting to suspend.
@@ -1287,7 +1335,6 @@ static int do_end_io(struct multipath *m, struct request *clone,
* clone bios for it and resubmit it later.
*/
int r = DM_ENDIO_REQUEUE;
- unsigned long flags;
if (!error && !clone->errors)
return 0; /* I/O complete */
@@ -1298,17 +1345,15 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (mpio->pgpath)
fail_path(mpio->pgpath);
- spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths) {
- if (!m->queue_if_no_path) {
- if (!__must_push_back(m))
+ if (!atomic_read(&m->nr_valid_paths)) {
+ if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ if (!must_push_back(m))
r = -EIO;
} else {
if (error == -EBADE)
r = error;
}
}
- spin_unlock_irqrestore(&m->lock, flags);
return r;
}
@@ -1364,11 +1409,12 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&m->lock, flags);
- m->queue_if_no_path = m->saved_queue_if_no_path;
- spin_unlock_irqrestore(&m->lock, flags);
+ if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
+ set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ else
+ clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ smp_mb__after_atomic();
}
/*
@@ -1402,19 +1448,20 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
/* Features */
if (type == STATUSTYPE_INFO)
- DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
+ DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
+ atomic_read(&m->pg_init_count));
else {
- DMEMIT("%u ", m->queue_if_no_path +
+ DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
(m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
- m->retain_attached_hw_handler);
- if (m->queue_if_no_path)
+ test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
- if (m->retain_attached_hw_handler)
+ if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
DMEMIT("retain_attached_hw_handler ");
}
@@ -1563,18 +1610,17 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
{
struct multipath *m = ti->private;
- unsigned long flags;
+ struct pgpath *current_pgpath;
int r;
- spin_lock_irqsave(&m->lock, flags);
+ current_pgpath = lockless_dereference(m->current_pgpath);
+ if (!current_pgpath)
+ current_pgpath = choose_pgpath(m, 0);
- if (!m->current_pgpath)
- __choose_pgpath(m, 0);
-
- if (m->current_pgpath) {
- if (!m->queue_io) {
- *bdev = m->current_pgpath->path.dev->bdev;
- *mode = m->current_pgpath->path.dev->mode;
+ if (current_pgpath) {
+ if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
+ *bdev = current_pgpath->path.dev->bdev;
+ *mode = current_pgpath->path.dev->mode;
r = 0;
} else {
/* pg_init has not started or completed */
@@ -1582,23 +1628,19 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
} else {
/* No path is available */
- if (m->queue_if_no_path)
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN;
else
r = -EIO;
}
- spin_unlock_irqrestore(&m->lock, flags);
-
if (r == -ENOTCONN) {
- spin_lock_irqsave(&m->lock, flags);
- if (!m->current_pg) {
+ if (!lockless_dereference(m->current_pg)) {
/* Path status changed, redo selection */
- __choose_pgpath(m, 0);
+ (void) choose_pgpath(m, 0);
}
- if (m->pg_init_required)
- __pg_init_all_paths(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
+ pg_init_all_paths(m);
dm_table_run_md_queue_async(m->ti->table);
}
@@ -1649,39 +1691,37 @@ static int multipath_busy(struct dm_target *ti)
{
bool busy = false, has_active = false;
struct multipath *m = ti->private;
- struct priority_group *pg;
+ struct priority_group *pg, *next_pg;
struct pgpath *pgpath;
- unsigned long flags;
-
- spin_lock_irqsave(&m->lock, flags);
/* pg_init in progress or no paths available */
- if (m->pg_init_in_progress ||
- (!m->nr_valid_paths && m->queue_if_no_path)) {
- busy = true;
- goto out;
- }
+ if (atomic_read(&m->pg_init_in_progress) ||
+ (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)))
+ return true;
+
/* Guess which priority_group will be used at next mapping time */
- if (unlikely(!m->current_pgpath && m->next_pg))
- pg = m->next_pg;
- else if (likely(m->current_pg))
- pg = m->current_pg;
- else
+ pg = lockless_dereference(m->current_pg);
+ next_pg = lockless_dereference(m->next_pg);
+ if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+ pg = next_pg;
+
+ if (!pg) {
/*
* We don't know which pg will be used at next mapping time.
- * We don't call __choose_pgpath() here to avoid to trigger
+ * We don't call choose_pgpath() here to avoid to trigger
* pg_init just by busy checking.
* So we don't know whether underlying devices we will be using
* at next mapping time are busy or not. Just try mapping.
*/
- goto out;
+ return busy;
+ }
/*
* If there is one non-busy active path at least, the path selector
* will be able to select it. So we consider such a pg as not busy.
*/
busy = true;
- list_for_each_entry(pgpath, &pg->pgpaths, list)
+ list_for_each_entry(pgpath, &pg->pgpaths, list) {
if (pgpath->is_active) {
has_active = true;
if (!pgpath_busy(pgpath)) {
@@ -1689,17 +1729,16 @@ static int multipath_busy(struct dm_target *ti)
break;
}
}
+ }
- if (!has_active)
+ if (!has_active) {
/*
* No active path in this pg, so this pg won't be used and
* the current_pg will be changed at next mapping time.
* We need to try mapping to determine it.
*/
busy = false;
-
-out:
- spin_unlock_irqrestore(&m->lock, flags);
+ }
return busy;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a0901214aef5..52532745a50f 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1037,6 +1037,11 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(mddev, rdev))
return -EINVAL;
+ if (le32_to_cpu(sb->features)) {
+ rs->ti->error = "Unable to assemble array: No feature flags supported yet";
+ return -EINVAL;
+ }
+
/* Enable bitmap creation for RAID levels != 0 */
mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
@@ -1718,7 +1723,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f9e8f0bef332..626a5ec04466 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1348,13 +1348,13 @@ static void dm_table_verify_integrity(struct dm_table *t)
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- unsigned flush = (*(unsigned *)data);
+ unsigned long flush = (unsigned long) data;
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && (q->flush_flags & flush);
+ return q && (q->queue_flags & flush);
}
-static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
{
struct dm_target *ti;
unsigned i = 0;
@@ -1375,7 +1375,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
return true;
if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_flush_capable, &flush))
+ ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
return true;
}
@@ -1506,7 +1506,7 @@ static bool dm_table_supports_discards(struct dm_table *t)
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
- unsigned flush = 0;
+ bool wc = false, fua = false;
/*
* Copy table's limits to the DM device's request_queue
@@ -1518,12 +1518,12 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- if (dm_table_supports_flush(t, REQ_FLUSH)) {
- flush |= REQ_FLUSH;
- if (dm_table_supports_flush(t, REQ_FUA))
- flush |= REQ_FUA;
+ if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
+ wc = true;
+ if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
+ fua = true;
}
- blk_queue_flush(q, flush);
+ blk_queue_write_cache(q, wc, fua);
if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 92237b6fa8cd..fc803d50f9f0 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -322,56 +322,6 @@ struct thin_c {
/*----------------------------------------------------------------*/
-/**
- * __blkdev_issue_discard_async - queue a discard with async completion
- * @bdev: blockdev to issue discard for
- * @sector: start sector
- * @nr_sects: number of sectors to discard
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @flags: BLKDEV_IFL_* flags to control behaviour
- * @parent_bio: parent discard bio that all sub discards get chained to
- *
- * Description:
- * Asynchronously issue a discard request for the sectors in question.
- */
-static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
- struct bio *parent_bio)
-{
- struct request_queue *q = bdev_get_queue(bdev);
- int type = REQ_WRITE | REQ_DISCARD;
- struct bio *bio;
-
- if (!q || !nr_sects)
- return -ENXIO;
-
- if (!blk_queue_discard(q))
- return -EOPNOTSUPP;
-
- if (flags & BLKDEV_DISCARD_SECURE) {
- if (!blk_queue_secdiscard(q))
- return -EOPNOTSUPP;
- type |= REQ_SECURE;
- }
-
- /*
- * Required bio_put occurs in bio_endio thanks to bio_chain below
- */
- bio = bio_alloc(gfp_mask, 1);
- if (!bio)
- return -ENOMEM;
-
- bio_chain(bio, parent_bio);
-
- bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_size = nr_sects << 9;
-
- submit_bio(type, bio);
-
- return 0;
-}
-
static bool block_size_is_power_of_two(struct pool *pool)
{
return pool->sectors_per_block_shift >= 0;
@@ -384,14 +334,55 @@ static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
(b * pool->sectors_per_block);
}
-static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
- struct bio *parent_bio)
+/*----------------------------------------------------------------*/
+
+struct discard_op {
+ struct thin_c *tc;
+ struct blk_plug plug;
+ struct bio *parent_bio;
+ struct bio *bio;
+};
+
+static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
+{
+ BUG_ON(!parent);
+
+ op->tc = tc;
+ blk_start_plug(&op->plug);
+ op->parent_bio = parent;
+ op->bio = NULL;
+}
+
+static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
{
+ struct thin_c *tc = op->tc;
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
- GFP_NOWAIT, 0, parent_bio);
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
+ GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio);
+}
+
+static void end_discard(struct discard_op *op, int r)
+{
+ if (op->bio) {
+ /*
+ * Even if one of the calls to issue_discard failed, we
+ * need to wait for the chain to complete.
+ */
+ bio_chain(op->bio, op->parent_bio);
+ submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
+ }
+
+ blk_finish_plug(&op->plug);
+
+ /*
+ * Even if r is set, there could be sub discards in flight that we
+ * need to wait for.
+ */
+ if (r && !op->parent_bio->bi_error)
+ op->parent_bio->bi_error = r;
+ bio_endio(op->parent_bio);
}
/*----------------------------------------------------------------*/
@@ -632,7 +623,7 @@ static void error_retry_list(struct pool *pool)
{
int error = get_pool_io_error_code(pool);
- return error_retry_list_with_code(pool, error);
+ error_retry_list_with_code(pool, error);
}
/*
@@ -1006,24 +997,28 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
mempool_free(m, tc->pool->mapping_pool);
}
-static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
+/*----------------------------------------------------------------*/
+
+static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
{
/*
* We've already unmapped this range of blocks, but before we
* passdown we have to check that these blocks are now unused.
*/
- int r;
+ int r = 0;
bool used = true;
struct thin_c *tc = m->tc;
struct pool *pool = tc->pool;
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
+ struct discard_op op;
+ begin_discard(&op, tc, m->bio);
while (b != end) {
/* find start of unmapped run */
for (; b < end; b++) {
r = dm_pool_block_is_used(pool->pmd, b, &used);
if (r)
- return r;
+ goto out;
if (!used)
break;
@@ -1036,20 +1031,20 @@ static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
for (e = b + 1; e != end; e++) {
r = dm_pool_block_is_used(pool->pmd, e, &used);
if (r)
- return r;
+ goto out;
if (used)
break;
}
- r = issue_discard(tc, b, e, m->bio);
+ r = issue_discard(&op, b, e);
if (r)
- return r;
+ goto out;
b = e;
}
-
- return 0;
+out:
+ end_discard(&op, r);
}
static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
@@ -1059,20 +1054,21 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
struct pool *pool = tc->pool;
r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
- if (r)
+ if (r) {
metadata_operation_failed(pool, "dm_thin_remove_range", r);
+ bio_io_error(m->bio);
- else if (m->maybe_shared)
- r = passdown_double_checking_shared_status(m);
- else
- r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
+ } else if (m->maybe_shared) {
+ passdown_double_checking_shared_status(m);
+
+ } else {
+ struct discard_op op;
+ begin_discard(&op, tc, m->bio);
+ r = issue_discard(&op, m->data_block,
+ m->data_block + (m->virt_end - m->virt_begin));
+ end_discard(&op, r);
+ }
- /*
- * Even if r is set, there could be sub discards in flight that we
- * need to wait for.
- */
- m->bio->bi_error = r;
- bio_endio(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
}
@@ -1494,17 +1490,6 @@ static void process_discard_cell_no_passdown(struct thin_c *tc,
pool->process_prepared_discard(m);
}
-/*
- * __bio_inc_remaining() is used to defer parent bios's end_io until
- * we _know_ all chained sub range discard bios have completed.
- */
-static inline void __bio_inc_remaining(struct bio *bio)
-{
- bio->bi_flags |= (1 << BIO_CHAIN);
- smp_mb__before_atomic();
- atomic_inc(&bio->__bi_remaining);
-}
-
static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
struct bio *bio)
{
@@ -1554,13 +1539,13 @@ static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t
/*
* The parent bio must not complete before sub discard bios are
- * chained to it (see __blkdev_issue_discard_async's bio_chain)!
+ * chained to it (see end_discard's bio_chain)!
*
* This per-mapping bi_remaining increment is paired with
* the implicit decrement that occurs via bio_endio() in
- * process_prepared_discard_{passdown,no_passdown}.
+ * end_discard().
*/
- __bio_inc_remaining(bio);
+ bio_inc_remaining(bio);
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
pool->process_prepared_discard(m);
@@ -3899,7 +3884,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 18, 0},
+ .version = {1, 19, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4273,7 +4258,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 18, 0},
+ .version = {1, 19, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3d3ac13287a4..1b2f96205361 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -674,7 +674,7 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
mempool_free(io, md->io_pool);
}
-static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
+static void free_tio(struct dm_target_io *tio)
{
bio_put(&tio->clone);
}
@@ -1055,7 +1055,7 @@ static void clone_endio(struct bio *bio)
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
disable_write_same(md);
- free_tio(md, tio);
+ free_tio(tio);
dec_pending(io, error);
}
@@ -1517,7 +1517,6 @@ static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
- struct mapped_device *md;
struct bio *clone = &tio->clone;
struct dm_target *ti = tio->ti;
@@ -1540,9 +1539,8 @@ static void __map_bio(struct dm_target_io *tio)
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
/* error the io and bail out, or requeue it if needed */
- md = tio->io->md;
dec_pending(tio->io, r);
- free_tio(md, tio);
+ free_tio(tio);
} else if (r != DM_MAPIO_SUBMITTED) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
@@ -1663,7 +1661,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
tio->len_ptr = len;
r = clone_bio(tio, bio, sector, *len);
if (r < 0) {
- free_tio(ci->md, tio);
+ free_tio(tio);
break;
}
__map_bio(tio);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 14d3b37944df..c9a475c33cc7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5039,7 +5039,7 @@ static int md_alloc(dev_t dev, char *name)
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
- blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(mddev->queue, true, true);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
* remove it now.
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 9531f5f05b93..26f14970a858 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1188,6 +1188,7 @@ ioerr:
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
struct r5l_log *log;
if (PAGE_SIZE != 4096)
@@ -1197,7 +1198,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
return -ENOMEM;
log->rdev = rdev;
- log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
+ log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
sizeof(rdev->mddev->uuid));
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 5a8d9c766633..7ddc4a9563ea 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -31,13 +31,9 @@ static struct property_entry spt_i2c_properties[] = {
{ },
};
-static struct property_set spt_i2c_pset = {
- .properties = spt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info spt_i2c_info = {
.clk_rate = 120000000,
- .pset = &spt_i2c_pset,
+ .properties = spt_i2c_properties,
};
static const struct intel_lpss_platform_info bxt_info = {
@@ -51,13 +47,9 @@ static struct property_entry bxt_i2c_properties[] = {
{ },
};
-static struct property_set bxt_i2c_pset = {
- .properties = bxt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
- .pset = &bxt_i2c_pset,
+ .properties = bxt_i2c_properties,
};
static const struct acpi_device_id intel_lpss_acpi_ids[] = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index a19e57118641..1d79a3c9370f 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -71,13 +71,9 @@ static struct property_entry spt_i2c_properties[] = {
{ },
};
-static struct property_set spt_i2c_pset = {
- .properties = spt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info spt_i2c_info = {
.clk_rate = 120000000,
- .pset = &spt_i2c_pset,
+ .properties = spt_i2c_properties,
};
static struct property_entry uart_properties[] = {
@@ -87,14 +83,10 @@ static struct property_entry uart_properties[] = {
{ },
};
-static struct property_set uart_pset = {
- .properties = uart_properties,
-};
-
static const struct intel_lpss_platform_info spt_uart_info = {
.clk_rate = 120000000,
.clk_con_id = "baudclk",
- .pset = &uart_pset,
+ .properties = uart_properties,
};
static const struct intel_lpss_platform_info bxt_info = {
@@ -104,7 +96,7 @@ static const struct intel_lpss_platform_info bxt_info = {
static const struct intel_lpss_platform_info bxt_uart_info = {
.clk_rate = 100000000,
.clk_con_id = "baudclk",
- .pset = &uart_pset,
+ .properties = uart_properties,
};
static struct property_entry bxt_i2c_properties[] = {
@@ -114,13 +106,9 @@ static struct property_entry bxt_i2c_properties[] = {
{ },
};
-static struct property_set bxt_i2c_pset = {
- .properties = bxt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
- .pset = &bxt_i2c_pset,
+ .properties = bxt_i2c_properties,
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 1bbbe877ba7e..6352aaba96a4 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -407,7 +407,7 @@ int intel_lpss_probe(struct device *dev,
if (ret)
return ret;
- lpss->cell->pset = info->pset;
+ lpss->cell->properties = info->properties;
intel_lpss_init_dev(lpss);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index 0dcea9eb2d03..694116630ffa 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -16,14 +16,14 @@
struct device;
struct resource;
-struct property_set;
+struct property_entry;
struct intel_lpss_platform_info {
struct resource *mem;
int irq;
unsigned long clk_rate;
const char *clk_con_id;
- struct property_set *pset;
+ struct property_entry *properties;
};
int intel_lpss_probe(struct device *dev,
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 88bd1b1e47be..fc1c1fc13813 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -193,8 +193,8 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_alias;
}
- if (cell->pset) {
- ret = platform_device_add_properties(pdev, cell->pset);
+ if (cell->properties) {
+ ret = platform_device_add_properties(pdev, cell->properties);
if (ret)
goto fail_alias;
}
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 967b9dd24fe9..030769018461 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -718,8 +718,8 @@ cberr:
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
- unsigned long m, *val = mesg, gpa, save;
- int ret;
+ unsigned long m;
+ int ret, loops = 200; /* experimentally determined */
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
@@ -735,22 +735,28 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
return MQE_OK;
/*
- * Send a cross-partition interrupt to the SSI that contains the target
- * message queue. Normally, the interrupt is automatically delivered by
- * hardware but some error conditions require explicit delivery.
- * Use the GRU to deliver the interrupt. Otherwise partition failures
+ * Send a noop message in order to deliver a cross-partition interrupt
+ * to the SSI that contains the target message queue. Normally, the
+ * interrupt is automatically delivered by hardware following mesq
+ * operations, but some error conditions require explicit delivery.
+ * The noop message will trigger delivery. Otherwise partition failures
* could cause unrecovered errors.
*/
- gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
- save = *val;
- *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
- dest_Fixed);
- gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
- ret = gru_wait(cb);
- *val = save;
- if (ret != CBS_IDLE)
- return MQE_UNEXPECTED_CB_ERR;
- return MQE_OK;
+ do {
+ ret = send_noop_message(cb, mqd, mesg);
+ } while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0));
+
+ if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) {
+ /*
+ * Don't indicate to the app to resend the message, as it's
+ * already been successfully sent. We simply send an OK
+ * (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
+ * assuming that the other side is receiving enough
+ * interrupts to get this message processed anyway.
+ */
+ ret = MQE_OK;
+ }
+ return ret;
}
/*
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 8a0147dfed27..ddc96206288a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -35,6 +35,7 @@
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
+#include <linux/idr.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -78,14 +79,14 @@ static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
/*
* We've only got one major, so number of mmcblk devices is
* limited to (1 << 20) / number of minors per device. It is also
- * currently limited by the size of the static bitmaps below.
+ * limited by the MAX_DEVICES below.
*/
static int max_devices;
#define MAX_DEVICES 256
-/* TODO: Replace these with struct ida */
-static DECLARE_BITMAP(dev_use, MAX_DEVICES);
+static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_SPINLOCK(mmc_blk_lock);
/*
* There is one mmc_blk_data per slot.
@@ -178,7 +179,9 @@ static void mmc_blk_put(struct mmc_blk_data *md)
int devidx = mmc_get_devidx(md->disk);
blk_cleanup_queue(md->queue.queue);
- __clear_bit(devidx, dev_use);
+ spin_lock(&mmc_blk_lock);
+ ida_remove(&mmc_blk_ida, devidx);
+ spin_unlock(&mmc_blk_lock);
put_disk(md->disk);
kfree(md);
@@ -945,16 +948,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
req->rq_disk->disk_name, "timed out", name, status);
/* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid)
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/*
* If it was a r/w cmd crc error, or illegal command
* (eg, issued in wrong state) then retry - we should
* have corrected the state problem above.
*/
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/* Otherwise abort the command */
return ERR_ABORT;
@@ -2189,10 +2198,23 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
- devidx = find_first_zero_bit(dev_use, max_devices);
- if (devidx >= max_devices)
- return ERR_PTR(-ENOSPC);
- __set_bit(devidx, dev_use);
+again:
+ if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&mmc_blk_lock);
+ ret = ida_get_new(&mmc_blk_ida, &devidx);
+ spin_unlock(&mmc_blk_lock);
+
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ERR_PTR(ret);
+
+ if (devidx >= max_devices) {
+ ret = -ENOSPC;
+ goto out;
+ }
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
@@ -2271,7 +2293,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
- blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(md->queue.queue, true, true);
}
if (mmc_card_mmc(card) &&
@@ -2289,6 +2311,9 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
err_kfree:
kfree(md);
out:
+ spin_lock(&mmc_blk_lock);
+ ida_remove(&mmc_blk_ida, devidx);
+ spin_unlock(&mmc_blk_lock);
return ERR_PTR(ret);
}
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 4c33d7690f2f..250f223aaa80 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -1,3 +1,24 @@
#
# MMC core configuration
#
+config PWRSEQ_EMMC
+ tristate "HW reset support for eMMC"
+ default y
+ depends on OF
+ help
+ This selects Hardware reset support aka pwrseq-emmc for eMMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_emmc.
+
+config PWRSEQ_SIMPLE
+ tristate "Simple HW reset support for MMC"
+ default y
+ depends on OF
+ help
+ This selects simple hardware reset support aka pwrseq-simple for MMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_simple.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f28b7..f007151dfdc6 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,5 +8,7 @@ mmc_core-y := core.o bus.o host.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
quirks.o slot-gpio.o
-mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
+mmc_core-$(CONFIG_OF) += pwrseq.o
+obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 41b1e761965f..99275e40bf2f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -36,6 +36,9 @@
#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
#include "core.h"
#include "bus.h"
#include "host.h"
@@ -140,6 +143,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->retries = 0;
}
+ trace_mmc_request_done(host, mrq);
+
if (err && cmd->retries && !mmc_card_removed(host->card)) {
/*
* Request starter must handle retries - see
@@ -215,6 +220,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
}
+ trace_mmc_request_start(host, mrq);
+
host->ops->request(host, mrq);
}
@@ -2449,8 +2456,9 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->reset(host);
mmc_bus_put(host);
- if (ret != -EOPNOTSUPP)
- pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+ if (ret)
+ pr_warn("%s: tried to reset card, got error %d\n",
+ mmc_hostname(host), ret);
return ret;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6e4c55a4aab5..e0a3ee16c0d3 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -33,14 +33,14 @@
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
-static DEFINE_IDR(mmc_host_idr);
+static DEFINE_IDA(mmc_host_ida);
static DEFINE_SPINLOCK(mmc_host_lock);
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
spin_lock(&mmc_host_lock);
- idr_remove(&mmc_host_idr, host->index);
+ ida_remove(&mmc_host_ida, host->index);
spin_unlock(&mmc_host_lock);
kfree(host);
}
@@ -321,14 +321,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
- idr_preload(GFP_KERNEL);
+
+again:
+ if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) {
+ kfree(host);
+ return NULL;
+ }
+
spin_lock(&mmc_host_lock);
- err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
- if (err >= 0)
- host->index = err;
+ err = ida_get_new(&mmc_host_ida, &host->index);
spin_unlock(&mmc_host_lock);
- idr_preload_end();
- if (err < 0) {
+
+ if (err == -EAGAIN) {
+ goto again;
+ } else if (err) {
kfree(host);
return NULL;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4dbe3df8024b..b81b08f81325 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -333,6 +333,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
}
}
+/* Minimum partition switch timeout in milliseconds */
+#define MMC_MIN_PART_SWITCH_TIME 300
+
/*
* Decode extended CSD.
*/
@@ -397,6 +400,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
/* EXT_CSD value is in units of 10ms, but we store in ms */
card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
+ /* Some eMMC set the value too low so set a minimum */
+ if (card->ext_csd.part_time &&
+ card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
+ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
/* Sleep / awake timeout in 100ns units */
if (sa_shift > 0 && sa_shift <= 0x17)
@@ -1244,10 +1251,11 @@ static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
bool send_status = true;
- unsigned int old_timing;
+ unsigned int old_timing, old_signal_voltage;
int err = -EINVAL;
u8 val;
+ old_signal_voltage = host->ios.signal_voltage;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
@@ -1256,7 +1264,7 @@ static int mmc_select_hs200(struct mmc_card *card)
/* If fails try again during next card power cycle */
if (err)
- goto err;
+ return err;
mmc_select_driver_type(card);
@@ -1290,9 +1298,14 @@ static int mmc_select_hs200(struct mmc_card *card)
}
}
err:
- if (err)
+ if (err) {
+ /* fall back to the old signal voltage, if fails report error */
+ if (__mmc_set_signal_voltage(host, old_signal_voltage))
+ err = -EIO;
+
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
__func__, err);
+ }
return err;
}
@@ -1314,21 +1327,13 @@ static int mmc_select_timing(struct mmc_card *card)
if (err && err != -EBADMSG)
return err;
- if (err) {
- pr_warn("%s: switch to %s failed\n",
- mmc_card_hs(card) ? "high-speed" :
- (mmc_card_hs200(card) ? "hs200" : ""),
- mmc_hostname(card->host));
- err = 0;
- }
-
bus_speed:
/*
* Set the bus speed to the selected bus timing.
* If timing is not selected, backward compatible is the default.
*/
mmc_set_bus_speed(card);
- return err;
+ return 0;
}
/*
@@ -1483,12 +1488,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
- /* If doing byte addressing, check if required to do sector
+ /*
+ * If doing byte addressing, check if required to do sector
* addressing. Handle the case of <2GB cards needing sector
* addressing. See section 8.1 JEDEC Standard JED84-A441;
* ocr register has bit 30 set for sector addressing.
*/
- if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
+ if (rocr & BIT(30))
mmc_card_set_blockaddr(card);
/* Erase size depends on CSD and Extended CSD */
@@ -1957,19 +1963,23 @@ static int mmc_reset(struct mmc_host *host)
{
struct mmc_card *card = host->card;
- if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
- return -EOPNOTSUPP;
-
- if (!mmc_can_reset(card))
- return -EOPNOTSUPP;
-
- mmc_set_clock(host, host->f_init);
-
- host->ops->hw_reset(host);
-
- /* Set initial state and call mmc_set_ios */
- mmc_set_initial_state(host);
-
+ /*
+ * In the case of recovery, we can't expect flushing the cache to work
+ * always, but we have a go and ignore errors.
+ */
+ mmc_flush_cache(host->card);
+
+ if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
+ mmc_can_reset(card)) {
+ /* If the card accept RST_n signal, send it. */
+ mmc_set_clock(host, host->f_init);
+ host->ops->hw_reset(host);
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+ } else {
+ /* Do a brute force power cycle */
+ mmc_power_cycle(host, card->ocr);
+ }
return mmc_init_card(host, card->ocr, card);
}
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 4c1d1757dbf9..9386c4771814 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -8,88 +8,55 @@
* MMC power sequence management
*/
#include <linux/kernel.h>
-#include <linux/platform_device.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/mmc/host.h>
#include "pwrseq.h"
-struct mmc_pwrseq_match {
- const char *compatible;
- struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev);
-};
-
-static struct mmc_pwrseq_match pwrseq_match[] = {
- {
- .compatible = "mmc-pwrseq-simple",
- .alloc = mmc_pwrseq_simple_alloc,
- }, {
- .compatible = "mmc-pwrseq-emmc",
- .alloc = mmc_pwrseq_emmc_alloc,
- },
-};
-
-static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
-{
- struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
- if (of_device_is_compatible(np, pwrseq_match[i].compatible)) {
- match = &pwrseq_match[i];
- break;
- }
- }
-
- return match;
-}
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
int mmc_pwrseq_alloc(struct mmc_host *host)
{
- struct platform_device *pdev;
struct device_node *np;
- struct mmc_pwrseq_match *match;
- struct mmc_pwrseq *pwrseq;
- int ret = 0;
+ struct mmc_pwrseq *p;
np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
if (!np)
return 0;
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- ret = -ENODEV;
- goto err;
- }
+ mutex_lock(&pwrseq_list_mutex);
+ list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
+ if (p->dev->of_node == np) {
+ if (!try_module_get(p->owner))
+ dev_err(host->parent,
+ "increasing module refcount failed\n");
+ else
+ host->pwrseq = p;
- match = mmc_pwrseq_find(np);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err;
+ break;
+ }
}
- pwrseq = match->alloc(host, &pdev->dev);
- if (IS_ERR(pwrseq)) {
- ret = PTR_ERR(pwrseq);
- goto err;
- }
+ of_node_put(np);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ if (!host->pwrseq)
+ return -EPROBE_DEFER;
- host->pwrseq = pwrseq;
dev_info(host->parent, "allocated mmc-pwrseq\n");
-err:
- of_node_put(np);
- return ret;
+ return 0;
}
void mmc_pwrseq_pre_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->pre_power_on)
+ if (pwrseq && pwrseq->ops->pre_power_on)
pwrseq->ops->pre_power_on(host);
}
@@ -97,7 +64,7 @@ void mmc_pwrseq_post_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->post_power_on)
+ if (pwrseq && pwrseq->ops->post_power_on)
pwrseq->ops->post_power_on(host);
}
@@ -105,7 +72,7 @@ void mmc_pwrseq_power_off(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->power_off)
+ if (pwrseq && pwrseq->ops->power_off)
pwrseq->ops->power_off(host);
}
@@ -113,8 +80,31 @@ void mmc_pwrseq_free(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->free)
- pwrseq->ops->free(host);
+ if (pwrseq) {
+ module_put(pwrseq->owner);
+ host->pwrseq = NULL;
+ }
+}
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
+ return -EINVAL;
- host->pwrseq = NULL;
+ mutex_lock(&pwrseq_list_mutex);
+ list_add(&pwrseq->pwrseq_node, &pwrseq_list);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
+
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ if (pwrseq) {
+ mutex_lock(&pwrseq_list_mutex);
+ list_del(&pwrseq->pwrseq_node);
+ mutex_unlock(&pwrseq_list_mutex);
+ }
}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 133de0426687..d69e751f148b 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,32 +8,39 @@
#ifndef _MMC_CORE_PWRSEQ_H
#define _MMC_CORE_PWRSEQ_H
+#include <linux/mmc/host.h>
+
struct mmc_pwrseq_ops {
void (*pre_power_on)(struct mmc_host *host);
void (*post_power_on)(struct mmc_host *host);
void (*power_off)(struct mmc_host *host);
- void (*free)(struct mmc_host *host);
};
struct mmc_pwrseq {
const struct mmc_pwrseq_ops *ops;
+ struct device *dev;
+ struct list_head pwrseq_node;
+ struct module *owner;
};
#ifdef CONFIG_OF
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq);
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq);
+
int mmc_pwrseq_alloc(struct mmc_host *host);
void mmc_pwrseq_pre_power_on(struct mmc_host *host);
void mmc_pwrseq_post_power_on(struct mmc_host *host);
void mmc_pwrseq_power_off(struct mmc_host *host);
void mmc_pwrseq_free(struct mmc_host *host);
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev);
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev);
-
#else
+static inline int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+static inline void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq) {}
static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 4a82bc77fe49..adc9c0c614fb 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -9,6 +9,9 @@
*/
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,6 +28,8 @@ struct mmc_pwrseq_emmc {
struct gpio_desc *reset_gpio;
};
+#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
{
gpiod_set_value(pwrseq->reset_gpio, 1);
@@ -35,27 +40,11 @@ static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
__mmc_pwrseq_emmc_reset(pwrseq);
}
-static void mmc_pwrseq_emmc_free(struct mmc_host *host)
-{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
-
- unregister_restart_handler(&pwrseq->reset_nb);
- gpiod_put(pwrseq->reset_gpio);
- kfree(pwrseq);
-}
-
-static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
- .post_power_on = mmc_pwrseq_emmc_reset,
- .free = mmc_pwrseq_emmc_free,
-};
-
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
unsigned long mode, void *cmd)
{
@@ -66,21 +55,22 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
return NOTIFY_DONE;
}
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev)
+static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .post_power_on = mmc_pwrseq_emmc_reset,
+};
+
+static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_emmc *pwrseq;
- int ret = 0;
+ struct device *dev = &pdev->dev;
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(pwrseq->reset_gpio)) {
- ret = PTR_ERR(pwrseq->reset_gpio);
- goto free;
- }
+ pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
/*
* register reset handler to ensure emmc reset also from
@@ -92,9 +82,38 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
register_restart_handler(&pwrseq->reset_nb);
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&pwrseq->reset_nb);
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
- return &pwrseq->pwrseq;
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return 0;
}
+
+static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
+ { .compatible = "mmc-pwrseq-emmc",},
+ {/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
+
+static struct platform_driver mmc_pwrseq_emmc_driver = {
+ .probe = mmc_pwrseq_emmc_probe,
+ .remove = mmc_pwrseq_emmc_remove,
+ .driver = {
+ .name = "pwrseq_emmc",
+ .of_match_table = mmc_pwrseq_emmc_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index bc173e18b71c..450d907c6e6c 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -8,7 +8,10 @@
* Simple MMC power sequence management
*/
#include <linux/clk.h>
+#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,6 +28,8 @@ struct mmc_pwrseq_simple {
struct gpio_descs *reset_gpios;
};
+#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
+
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
@@ -44,8 +49,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
clk_prepare_enable(pwrseq->ext_clk);
@@ -57,16 +61,14 @@ static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
}
static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
@@ -76,59 +78,64 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
}
}
-static void mmc_pwrseq_simple_free(struct mmc_host *host)
-{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
-
- if (!IS_ERR(pwrseq->reset_gpios))
- gpiod_put_array(pwrseq->reset_gpios);
-
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-
- kfree(pwrseq);
-}
-
static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
.pre_power_on = mmc_pwrseq_simple_pre_power_on,
.post_power_on = mmc_pwrseq_simple_post_power_on,
.power_off = mmc_pwrseq_simple_power_off,
- .free = mmc_pwrseq_simple_free,
};
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev)
+static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
+ { .compatible = "mmc-pwrseq-simple",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
+
+static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq;
- int ret = 0;
+ struct device *dev = &pdev->dev;
- pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- pwrseq->ext_clk = clk_get(dev, "ext_clock");
- if (IS_ERR(pwrseq->ext_clk) &&
- PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
- ret = PTR_ERR(pwrseq->ext_clk);
- goto free;
- }
+ pwrseq->ext_clk = devm_clk_get(dev, "ext_clock");
+ if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT)
+ return PTR_ERR(pwrseq->ext_clk);
- pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
+ pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset",
+ GPIOD_OUT_HIGH);
if (IS_ERR(pwrseq->reset_gpios) &&
PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
- ret = PTR_ERR(pwrseq->reset_gpios);
- goto clk_put;
+ return PTR_ERR(pwrseq->reset_gpios);
}
+ pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
- return &pwrseq->pwrseq;
-clk_put:
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
}
+
+static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
+
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static struct platform_driver mmc_pwrseq_simple_driver = {
+ .probe = mmc_pwrseq_simple_probe,
+ .remove = mmc_pwrseq_simple_remove,
+ .driver = {
+ .name = "pwrseq_simple",
+ .of_match_table = mmc_pwrseq_simple_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 6f6fc527a263..dcb3dee59fa5 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -177,8 +177,13 @@ static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
vsn = func->card->cccr.sdio_vsn;
min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
- if (size < min_size)
+ if (size == 28 && vsn == SDIO_SDIO_REV_1_10) {
+ pr_warn("%s: card has broken SDIO 1.1 CIS, forcing SDIO 1.0\n",
+ mmc_hostname(card->host));
+ vsn = SDIO_SDIO_REV_1_00;
+ } else if (size < min_size) {
return -EINVAL;
+ }
/* TPLFE_MAX_BLK_SIZE */
func->max_blksize = buf[12] | (buf[13] << 8);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e657af0e95fa..0aa484c10c0a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -677,9 +677,9 @@ config MMC_SH_MMCIF
depends on HAS_DMA
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
- This selects the MMC Host Interface controller (MMCIF).
+ This selects the MMC Host Interface controller (MMCIF) found in various
+ Renesas SoCs for SH and ARM architectures.
- This driver supports MMCIF in sh7724/sh7757/sh7372.
config MMC_JZ4740
tristate "JZ4740 SD/Multimedia Card Interface support"
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9268c41a8561..0ad8ef565b74 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1410,8 +1410,6 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(slot->mrq);
dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
- pm_runtime_get_sync(&host->pdev->dev);
-
/*
* We may "know" the card is gone even though there's still an
* electrical connection. If so, we really need to communicate
@@ -1442,8 +1440,6 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct atmel_mci *host = slot->host;
unsigned int i;
- pm_runtime_get_sync(&host->pdev->dev);
-
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
@@ -1576,8 +1572,6 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&host->pdev->dev);
}
static int atmci_get_ro(struct mmc_host *mmc)
@@ -1669,9 +1663,6 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
-
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&host->pdev->dev);
}
static void atmci_command_complete(struct atmel_mci *host,
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 693144e7427b..a56373c75983 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -32,12 +32,10 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/edma.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/platform_data/edma.h>
#include <linux/platform_data/mmc-davinci.h>
/*
@@ -202,7 +200,6 @@ struct mmc_davinci_host {
u32 buffer_bytes_left;
u32 bytes_left;
- u32 rxdma, txdma;
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
bool use_dma;
@@ -513,35 +510,20 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
- int r;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma_tx =
- dma_request_slave_channel_compat(mask, edma_filter_fn,
- &host->txdma, mmc_dev(host->mmc), "tx");
- if (!host->dma_tx) {
+ host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
+ if (IS_ERR(host->dma_tx)) {
dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
- return -ENODEV;
+ return PTR_ERR(host->dma_tx);
}
- host->dma_rx =
- dma_request_slave_channel_compat(mask, edma_filter_fn,
- &host->rxdma, mmc_dev(host->mmc), "rx");
- if (!host->dma_rx) {
+ host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
+ if (IS_ERR(host->dma_rx)) {
dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
- r = -ENODEV;
- goto free_master_write;
+ dma_release_channel(host->dma_tx);
+ return PTR_ERR(host->dma_rx);
}
return 0;
-
-free_master_write:
- dma_release_channel(host->dma_tx);
-
- return r;
}
/*----------------------------------------------------------------------*/
@@ -1223,7 +1205,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
- int ret = 0, irq = 0;
+ int ret, irq;
size_t mem_size;
const struct platform_device_id *id_entry;
@@ -1233,50 +1215,40 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = -ENODEV;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq == NO_IRQ)
- goto out;
+ return -ENODEV;
- ret = -EBUSY;
mem_size = resource_size(r);
- mem = request_mem_region(r->start, mem_size, pdev->name);
+ mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
+ pdev->name);
if (!mem)
- goto out;
+ return -EBUSY;
- ret = -ENOMEM;
mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
if (!mmc)
- goto out;
+ return -ENOMEM;
host = mmc_priv(mmc);
host->mmc = mmc; /* Important */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r)
- dev_warn(&pdev->dev, "RX DMA resource not specified\n");
- else
- host->rxdma = r->start;
-
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!r)
- dev_warn(&pdev->dev, "TX DMA resource not specified\n");
- else
- host->txdma = r->start;
-
host->mem_res = mem;
- host->base = ioremap(mem->start, mem_size);
- if (!host->base)
- goto out;
+ host->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto ioremap_fail;
+ }
- ret = -ENXIO;
- host->clk = clk_get(&pdev->dev, "MMCSDCLK");
+ host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto out;
+ goto clk_get_fail;
}
- clk_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto clk_prepare_enable_fail;
+
host->mmc_input_clk = clk_get_rate(host->clk);
init_mmcsd_host(host);
@@ -1291,8 +1263,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->mmc_irq = irq;
host->sdio_irq = platform_get_irq(pdev, 1);
- if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
- host->use_dma = 0;
+ if (host->use_dma) {
+ ret = davinci_acquire_dma_channels(host);
+ if (ret == -EPROBE_DEFER)
+ goto dma_probe_defer;
+ else if (ret)
+ host->use_dma = 0;
+ }
/* REVISIT: someday, support IRQ-driven card detection. */
mmc->caps |= MMC_CAP_NEEDS_POLL;
@@ -1346,15 +1323,17 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
ret = mmc_add_host(mmc);
if (ret < 0)
- goto out;
+ goto mmc_add_host_fail;
- ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
+ ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0,
+ mmc_hostname(mmc), host);
if (ret)
- goto out;
+ goto request_irq_fail;
if (host->sdio_irq >= 0) {
- ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
- mmc_hostname(mmc), host);
+ ret = devm_request_irq(&pdev->dev, host->sdio_irq,
+ mmc_davinci_sdio_irq, 0,
+ mmc_hostname(mmc), host);
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
}
@@ -1367,28 +1346,18 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
return 0;
-out:
+request_irq_fail:
+ mmc_remove_host(mmc);
+mmc_add_host_fail:
mmc_davinci_cpufreq_deregister(host);
cpu_freq_fail:
- if (host) {
- davinci_release_dma_channels(host);
-
- if (host->clk) {
- clk_disable(host->clk);
- clk_put(host->clk);
- }
-
- if (host->base)
- iounmap(host->base);
- }
-
- if (mmc)
- mmc_free_host(mmc);
-
- if (mem)
- release_resource(mem);
-
- dev_dbg(&pdev->dev, "probe err %d\n", ret);
+ davinci_release_dma_channels(host);
+dma_probe_defer:
+ clk_disable_unprepare(host->clk);
+clk_prepare_enable_fail:
+clk_get_fail:
+ioremap_fail:
+ mmc_free_host(mmc);
return ret;
}
@@ -1397,25 +1366,11 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
- if (host) {
- mmc_davinci_cpufreq_deregister(host);
-
- mmc_remove_host(host->mmc);
- free_irq(host->mmc_irq, host);
- if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
- free_irq(host->sdio_irq, host);
-
- davinci_release_dma_channels(host);
-
- clk_disable(host->clk);
- clk_put(host->clk);
-
- iounmap(host->base);
-
- release_resource(host->mem_res);
-
- mmc_free_host(host->mmc);
- }
+ mmc_remove_host(host->mmc);
+ mmc_davinci_cpufreq_deregister(host);
+ davinci_release_dma_channels(host);
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(host->mmc);
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 8790f2afc057..7e3a3247b852 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -91,10 +91,14 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
}
-static int dw_mci_exynos_priv_init(struct dw_mci *host)
+static void dw_mci_exynos_config_smu(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
+ /*
+ * If Exynos is provided the Security management,
+ * set for non-ecryption mode at this time.
+ */
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
mci_writel(host, MPSBEGIN0, 0);
@@ -104,6 +108,13 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
SDMMC_MPSCTRL_VALID |
SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT);
}
+}
+
+static int dw_mci_exynos_priv_init(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ dw_mci_exynos_config_smu(host);
if (priv->ctrl_type >= DW_MCI_TYPE_EXYNOS5420) {
priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
@@ -115,13 +126,6 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
- return 0;
-}
-
-static int dw_mci_exynos_setup_clock(struct dw_mci *host)
-{
- struct dw_mci_exynos_priv_data *priv = host->priv;
-
host->bus_hz /= (priv->ciu_div + 1);
return 0;
@@ -169,7 +173,7 @@ static int dw_mci_exynos_resume(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
- dw_mci_exynos_priv_init(host);
+ dw_mci_exynos_config_smu(host);
return dw_mci_resume(host);
}
@@ -489,7 +493,6 @@ static unsigned long exynos_dwmmc_caps[4] = {
static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
.init = dw_mci_exynos_priv_init,
- .setup_clock = dw_mci_exynos_setup_clock,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
.execute_tuning = dw_mci_exynos_execute_tuning,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 84e50f3a64b6..8c20b81cafd8 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -26,13 +26,6 @@ struct dw_mci_rockchip_priv_data {
int default_sample_phase;
};
-static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
-{
- host->bus_hz /= RK3288_CLKGEN_DIV;
-
- return 0;
-}
-
static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct dw_mci_rockchip_priv_data *priv = host->priv;
@@ -231,18 +224,30 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* It needs this quirk on all Rockchip SoCs */
host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
+ if (of_device_is_compatible(host->dev->of_node,
+ "rockchip,rk3288-dw-mshc"))
+ host->bus_hz /= RK3288_CLKGEN_DIV;
+
return 0;
}
+/* Common capabilities of RK3288 SoC */
+static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
+ MMC_CAP_ERASE,
+ MMC_CAP_ERASE,
+ MMC_CAP_ERASE,
+ MMC_CAP_ERASE,
+};
+
static const struct dw_mci_drv_data rk2928_drv_data = {
.init = dw_mci_rockchip_init,
};
static const struct dw_mci_drv_data rk3288_drv_data = {
+ .caps = dw_mci_rk3288_dwmmc_caps,
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
.parse_dt = dw_mci_rk3288_parse_dt,
- .setup_clock = dw_mci_rk3288_setup_clock,
.init = dw_mci_rockchip_init,
};
@@ -269,33 +274,13 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_rockchip_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_rockchip_resume(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_rockchip_pmops,
- dw_mci_rockchip_suspend,
- dw_mci_rockchip_resume);
-
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.probe = dw_mci_rockchip_probe,
.remove = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_rockchip",
.of_match_table = dw_mci_rockchip_match,
- .pm = &dw_mci_rockchip_pmops,
+ .pm = &dw_mci_pltfm_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 242f9a0769bd..9dd1bd358434 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -680,7 +680,7 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
static void dw_mci_edmac_stop_dma(struct dw_mci *host)
{
- dmaengine_terminate_all(host->dms->ch);
+ dmaengine_terminate_async(host->dms->ch);
}
static int dw_mci_edmac_start_dma(struct dw_mci *host,
@@ -3003,15 +3003,6 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (drv_data && drv_data->setup_clock) {
- ret = drv_data->setup_clock(host);
- if (ret) {
- dev_err(host->dev,
- "implementation specific clock setup failed\n");
- goto err_clk_ciu;
- }
- }
-
setup_timer(&host->cmd11_timer,
dw_mci_cmd11_timer, (unsigned long)host);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 68d5da2dfd19..1e8d8380f9cf 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -277,7 +277,6 @@ struct dw_mci_slot {
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
* @init: early implementation specific initialization.
- * @setup_clock: implementation specific clock configuration.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
* @execute_tuning: implementation specific tuning procedure.
@@ -289,7 +288,6 @@ struct dw_mci_slot {
struct dw_mci_drv_data {
unsigned long *caps;
int (*init)(struct dw_mci *host);
- int (*setup_clock)(struct dw_mci *host);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2e6c96845c9a..df990bb8c873 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -226,16 +226,11 @@ static int mmci_card_busy(struct mmc_host *mmc)
unsigned long flags;
int busy = 0;
- pm_runtime_get_sync(mmc_dev(mmc));
-
spin_lock_irqsave(&host->lock, flags);
if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
busy = 1;
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
-
return busy;
}
@@ -381,9 +376,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->cmd = NULL;
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(host->mmc));
- pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
@@ -1290,8 +1282,6 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
return;
}
- pm_runtime_get_sync(mmc_dev(mmc));
-
spin_lock_irqsave(&host->lock, flags);
host->mrq = mrq;
@@ -1318,8 +1308,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
int ret;
- pm_runtime_get_sync(mmc_dev(mmc));
-
if (host->plat->ios_handler &&
host->plat->ios_handler(mmc_dev(mmc), ios))
dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
@@ -1414,9 +1402,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
static int mmci_get_cd(struct mmc_host *mmc)
@@ -1440,8 +1425,6 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vqmmc)) {
- pm_runtime_get_sync(mmc_dev(mmc));
-
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
ret = regulator_set_voltage(mmc->supply.vqmmc,
@@ -1459,9 +1442,6 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
if (ret)
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
return ret;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index b17f30da97da..5642f71f8bf0 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -736,9 +736,6 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
if (mrq->data)
msdc_unprepare_data(host, mrq);
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
/* returns true if command is fully handled; returns false otherwise */
@@ -886,8 +883,6 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq);
host->mrq = mrq;
- pm_runtime_get_sync(host->dev);
-
if (mrq->data)
msdc_prepare_data(host, mrq);
@@ -1201,8 +1196,6 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct msdc_host *host = mmc_priv(mmc);
int ret;
- pm_runtime_get_sync(host->dev);
-
msdc_set_buswidth(host, ios->bus_width);
/* Suspend/Resume will do power off/on */
@@ -1214,7 +1207,7 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
ios->vdd);
if (ret) {
dev_err(host->dev, "Failed to set vmmc power!\n");
- goto end;
+ return;
}
}
break;
@@ -1242,10 +1235,6 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->mclk != ios->clock || host->timing != ios->timing)
msdc_set_mclk(host, ios->timing, ios->clock);
-
-end:
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
static u32 test_delay_bit(u32 delay, u32 bit)
@@ -1408,19 +1397,15 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct msdc_host *host = mmc_priv(mmc);
int ret;
- pm_runtime_get_sync(host->dev);
ret = msdc_tune_response(mmc, opcode);
if (ret == -EIO) {
dev_err(host->dev, "Tune response fail!\n");
- goto out;
+ return ret;
}
ret = msdc_tune_data(mmc, opcode);
if (ret == -EIO)
dev_err(host->dev, "Tune data fail!\n");
-out:
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
return ret;
}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b9958a123594..f23d65eb070d 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/of.h>
-#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
@@ -1321,8 +1320,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
struct resource *res;
- dma_cap_mask_t mask;
- unsigned sig = 0;
int i, ret = 0;
int irq;
@@ -1382,29 +1379,34 @@ static int mmc_omap_probe(struct platform_device *pdev)
goto err_free_iclk;
}
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
host->dma_tx_burst = -1;
host->dma_rx_burst = -1;
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
- if (res)
- sig = res->start;
- host->dma_tx = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn, &sig, &pdev->dev, "tx");
- if (!host->dma_tx)
- dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
- sig);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
- if (res)
- sig = res->start;
- host->dma_rx = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn, &sig, &pdev->dev, "rx");
- if (!host->dma_rx)
- dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
- sig);
+ host->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(host->dma_tx)) {
+ ret = PTR_ERR(host->dma_tx);
+ if (ret == -EPROBE_DEFER) {
+ clk_put(host->fclk);
+ goto err_free_iclk;
+ }
+
+ host->dma_tx = NULL;
+ dev_warn(host->dev, "TX DMA channel request failed\n");
+ }
+
+ host->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(host->dma_rx)) {
+ ret = PTR_ERR(host->dma_rx);
+ if (ret == -EPROBE_DEFER) {
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ clk_put(host->fclk);
+ goto err_free_iclk;
+ }
+
+ host->dma_rx = NULL;
+ dev_warn(host->dev, "RX DMA channel request failed\n");
+ }
ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
if (ret)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index f9ac3bb5d617..24ebc9a8de89 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -32,7 +32,6 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
-#include <linux/omap-dmaengine.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
@@ -351,15 +350,14 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
}
-static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
+ int vdd)
{
- struct omap_hsmmc_host *host =
- platform_get_drvdata(to_platform_device(dev));
struct mmc_host *mmc = host->mmc;
int ret = 0;
if (mmc_pdata(host)->set_power)
- return mmc_pdata(host)->set_power(dev, power_on, vdd);
+ return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
/*
* If we don't see a Vcc regulator, assume it's a fixed
@@ -369,7 +367,7 @@ static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
return 0;
if (mmc_pdata(host)->before_set_reg)
- mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
+ mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
ret = omap_hsmmc_set_pbias(host, false, 0);
if (ret)
@@ -403,7 +401,7 @@ static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
}
if (mmc_pdata(host)->after_set_reg)
- mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
+ mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
return 0;
@@ -968,8 +966,6 @@ static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_req
return;
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
/*
@@ -1250,17 +1246,15 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
int ret;
/* Disable the clocks */
- pm_runtime_put_sync(host->dev);
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = omap_hsmmc_set_power(host->dev, 0, 0);
+ ret = omap_hsmmc_set_power(host, 0, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = omap_hsmmc_set_power(host->dev, 1, vdd);
- pm_runtime_get_sync(host->dev);
+ ret = omap_hsmmc_set_power(host, 1, vdd);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1368,8 +1362,6 @@ static void omap_hsmmc_dma_callback(void *param)
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
}
@@ -1602,7 +1594,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
BUG_ON(host->req_in_progress);
BUG_ON(host->dma_ch != -1);
- pm_runtime_get_sync(host->dev);
if (host->protect_card) {
if (host->reqs_blocked < 3) {
/*
@@ -1619,8 +1610,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
req->data->error = -EBADF;
req->cmd->retries = 0;
mmc_request_done(mmc, req);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
return;
} else if (host->reqs_blocked)
host->reqs_blocked = 0;
@@ -1634,8 +1623,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
req->data->error = err;
host->mrq = NULL;
mmc_request_done(mmc, req);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
return;
}
if (req->sbc && !(host->flags & AUTO_CMD23)) {
@@ -1653,15 +1640,13 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct omap_hsmmc_host *host = mmc_priv(mmc);
int do_send_init_stream = 0;
- pm_runtime_get_sync(host->dev);
-
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- omap_hsmmc_set_power(host->dev, 0, 0);
+ omap_hsmmc_set_power(host, 0, 0);
break;
case MMC_POWER_UP:
- omap_hsmmc_set_power(host->dev, 1, ios->vdd);
+ omap_hsmmc_set_power(host, 1, ios->vdd);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
@@ -1698,8 +1683,6 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
send_init_stream(host);
omap_hsmmc_set_bus_mode(host);
-
- pm_runtime_put_autosuspend(host->dev);
}
static int omap_hsmmc_get_cd(struct mmc_host *mmc)
@@ -1962,13 +1945,17 @@ MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
{
- struct omap_hsmmc_platform_data *pdata;
+ struct omap_hsmmc_platform_data *pdata, *legacy;
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM); /* out of memory */
+ legacy = dev_get_platdata(dev);
+ if (legacy && legacy->name)
+ pdata->name = legacy->name;
+
if (of_find_property(np, "ti,dual-volt", NULL))
pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
@@ -2005,8 +1992,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
const struct of_device_id *match;
- dma_cap_mask_t mask;
- unsigned tx_req, rx_req;
const struct omap_mmc_of_data *data;
void __iomem *base;
@@ -2136,44 +2121,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_conf_bus_power(host);
- if (!pdev->dev.of_node) {
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
- if (!res) {
- dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
- ret = -ENXIO;
- goto err_irq;
- }
- tx_req = res->start;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
- if (!res) {
- dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
- ret = -ENXIO;
- goto err_irq;
- }
- rx_req = res->start;
- }
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->rx_chan =
- dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &rx_req, &pdev->dev, "rx");
-
- if (!host->rx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel\n");
- ret = -ENXIO;
+ host->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(host->rx_chan)) {
+ dev_err(mmc_dev(host->mmc), "RX DMA channel request failed\n");
+ ret = PTR_ERR(host->rx_chan);
goto err_irq;
}
- host->tx_chan =
- dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &tx_req, &pdev->dev, "tx");
-
- if (!host->tx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel\n");
- ret = -ENXIO;
+ host->tx_chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(host->tx_chan)) {
+ dev_err(mmc_dev(host->mmc), "TX DMA channel request failed\n");
+ ret = PTR_ERR(host->tx_chan);
goto err_irq;
}
@@ -2231,9 +2189,9 @@ err_slot_name:
mmc_remove_host(mmc);
err_irq:
device_init_wakeup(&pdev->dev, false);
- if (host->tx_chan)
+ if (!IS_ERR_OR_NULL(host->tx_chan))
dma_release_channel(host->tx_chan);
- if (host->rx_chan)
+ if (!IS_ERR_OR_NULL(host->rx_chan))
dma_release_channel(host->rx_chan);
pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index bed6a494f52c..b2d70ba6caa7 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -200,8 +200,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
if (!gpio_cd)
return 0;
- pm_runtime_get_sync(mmc->parent);
-
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
@@ -211,9 +209,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
out:
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_mark_last_busy(mmc->parent);
- pm_runtime_put_autosuspend(mmc->parent);
-
return ret;
}
@@ -267,8 +262,10 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
/* Platform specific code during sd probe slot goes here */
- if (hid && !strcmp(hid, "80865ACA"))
+ if (hid && !strcmp(hid, "80865ACA")) {
host->mmc_host_ops.get_cd = bxt_get_cd;
+ host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 2e482b13d25e..b6f4c1d41636 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -55,8 +55,32 @@ static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
return freq;
}
+static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ bool ctrl_phy = false;
+
+ if (clock > MMC_HIGH_52_MAX_DTR && (!IS_ERR(sdhci_arasan->phy)))
+ ctrl_phy = true;
+
+ if (ctrl_phy) {
+ spin_unlock_irq(&host->lock);
+ phy_power_off(sdhci_arasan->phy);
+ spin_lock_irq(&host->lock);
+ }
+
+ sdhci_set_clock(host, clock);
+
+ if (ctrl_phy) {
+ spin_unlock_irq(&host->lock);
+ phy_power_on(sdhci_arasan->phy);
+ spin_lock_irq(&host->lock);
+ }
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_arasan_get_timeout_clock,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 2703aa90d018..25f779e09d8e 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -15,8 +15,10 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
@@ -31,14 +33,60 @@
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
+#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
+
struct sdhci_at91_priv {
struct clk *hclock;
struct clk *gck;
struct clk *mainck;
};
+static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ unsigned long timeout;
+
+ host->mmc->actual_clock = 0;
+
+ /*
+ * There is no requirement to disable the internal clock before
+ * changing the SD clock configuration. Moreover, disabling the
+ * internal clock, changing the configuration and re-enabling the
+ * internal clock causes some bugs. It can prevent to get the internal
+ * clock stable flag ready and an unexpected switch to the base clock
+ * when using presets.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -46,7 +94,6 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
static const struct sdhci_pltfm_data soc_data_sama5d2 = {
.ops = &sdhci_at91_sama5d2_ops,
- .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
};
static const struct of_device_id sdhci_at91_dt_match[] = {
@@ -119,6 +166,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
unsigned int clk_base, clk_mul;
unsigned int gck_rate, real_gck_rate;
int ret;
+ unsigned int preset_div;
match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
if (!match)
@@ -186,6 +234,28 @@ static int sdhci_at91_probe(struct platform_device *pdev)
clk_mul, real_gck_rate);
}
+ /*
+ * We have to set preset values because it depends on the clk_mul
+ * value. Moreover, SDR104 is supported in a degraded mode since the
+ * maximum sd clock value is 120 MHz instead of 208 MHz. For that
+ * reason, we need to use presets to support SDR104.
+ */
+ preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR12);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR25);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR50);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR104);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_DDR50);
+
clk_prepare_enable(priv->mainck);
clk_prepare_enable(priv->gck);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 79e19017343e..97d4eebd6bf5 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -340,8 +340,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
if (!gpio_cd)
return 0;
- pm_runtime_get_sync(mmc->parent);
-
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
@@ -351,9 +349,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
out:
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_mark_last_busy(mmc->parent);
- pm_runtime_put_autosuspend(mmc->parent);
-
return ret;
}
@@ -391,8 +386,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
- slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) {
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+ slot->host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
index 059df707a2fe..72c13b6f05f9 100644
--- a/drivers/mmc/host/sdhci-pic32.c
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -243,7 +243,6 @@ MODULE_DEVICE_TABLE(of, pic32_sdhci_id_table);
static struct platform_driver pic32_sdhci_driver = {
.driver = {
.name = "pic32-sdhci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_sdhci_id_table),
},
.probe = pic32_sdhci_probe,
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 072bb27a65cf..64f287a03cd3 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -119,16 +119,22 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
{
struct sdhci_host *host;
struct resource *iomem;
- int ret;
+ void __iomem *ioaddr;
+ int irq, ret;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- ret = -ENOMEM;
+ ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(ioaddr)) {
+ ret = PTR_ERR(ioaddr);
goto err;
}
- if (resource_size(iomem) < 0x100)
- dev_err(&pdev->dev, "Invalid iomem size!\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ number\n");
+ ret = irq;
+ goto err;
+ }
host = sdhci_alloc_host(&pdev->dev,
sizeof(struct sdhci_pltfm_host) + priv_size);
@@ -138,6 +144,8 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
goto err;
}
+ host->ioaddr = ioaddr;
+ host->irq = irq;
host->hw_name = dev_name(&pdev->dev);
if (pdata && pdata->ops)
host->ops = pdata->ops;
@@ -148,22 +156,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
host->quirks2 = pdata->quirks2;
}
- host->irq = platform_get_irq(pdev, 0);
-
- if (!request_mem_region(iomem->start, resource_size(iomem),
- mmc_hostname(host->mmc))) {
- dev_err(&pdev->dev, "cannot request region\n");
- ret = -EBUSY;
- goto err_request;
- }
-
- host->ioaddr = ioremap(iomem->start, resource_size(iomem));
- if (!host->ioaddr) {
- dev_err(&pdev->dev, "failed to remap registers\n");
- ret = -ENOMEM;
- goto err_remap;
- }
-
/*
* Some platforms need to probe the controller to be able to
* determine which caps should be used.
@@ -174,11 +166,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
platform_set_drvdata(pdev, host);
return host;
-
-err_remap:
- release_mem_region(iomem->start, resource_size(iomem));
-err_request:
- sdhci_free_host(host);
err:
dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
return ERR_PTR(ret);
@@ -188,10 +175,7 @@ EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
void sdhci_pltfm_free(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(host->ioaddr);
- release_mem_region(iomem->start, resource_size(iomem));
sdhci_free_host(host);
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6bd3d1794966..e010ea4eb6f5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -38,11 +38,6 @@
#define DBG(f, x...) \
pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
-#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
- defined(CONFIG_MMC_SDHCI_MODULE))
-#define SDHCI_USE_LEDS_CLASS
-#endif
-
#define MAX_TUNING_LOOP 40
static unsigned int debug_quirks = 0;
@@ -53,29 +48,7 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-static int sdhci_do_get_cd(struct sdhci_host *host);
-
-#ifdef CONFIG_PM
-static int sdhci_runtime_pm_get(struct sdhci_host *host);
-static int sdhci_runtime_pm_put(struct sdhci_host *host);
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
-#else
-static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
-{
- return 0;
-}
-static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
-{
- return 0;
-}
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
-{
-}
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
-{
-}
-#endif
+static int sdhci_get_cd(struct mmc_host *mmc);
static void sdhci_dumpregs(struct sdhci_host *host)
{
@@ -171,6 +144,22 @@ static void sdhci_disable_card_detection(struct sdhci_host *host)
sdhci_set_card_detection(host, false);
}
+static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+{
+ if (host->bus_on)
+ return;
+ host->bus_on = true;
+ pm_runtime_get_noresume(host->mmc->parent);
+}
+
+static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+{
+ if (!host->bus_on)
+ return;
+ host->bus_on = false;
+ pm_runtime_put_noidle(host->mmc->parent);
+}
+
void sdhci_reset(struct sdhci_host *host, u8 mask)
{
unsigned long timeout;
@@ -204,7 +193,7 @@ EXPORT_SYMBOL_GPL(sdhci_reset);
static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!sdhci_do_get_cd(host))
+ if (!sdhci_get_cd(host->mmc))
return;
}
@@ -252,7 +241,7 @@ static void sdhci_reinit(struct sdhci_host *host)
sdhci_enable_card_detection(host);
}
-static void sdhci_activate_led(struct sdhci_host *host)
+static void __sdhci_led_activate(struct sdhci_host *host)
{
u8 ctrl;
@@ -261,7 +250,7 @@ static void sdhci_activate_led(struct sdhci_host *host)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
-static void sdhci_deactivate_led(struct sdhci_host *host)
+static void __sdhci_led_deactivate(struct sdhci_host *host)
{
u8 ctrl;
@@ -270,9 +259,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
-#ifdef SDHCI_USE_LEDS_CLASS
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
static void sdhci_led_control(struct led_classdev *led,
- enum led_brightness brightness)
+ enum led_brightness brightness)
{
struct sdhci_host *host = container_of(led, struct sdhci_host, led);
unsigned long flags;
@@ -283,12 +272,62 @@ static void sdhci_led_control(struct led_classdev *led,
goto out;
if (brightness == LED_OFF)
- sdhci_deactivate_led(host);
+ __sdhci_led_deactivate(host);
else
- sdhci_activate_led(host);
+ __sdhci_led_activate(host);
out:
spin_unlock_irqrestore(&host->lock, flags);
}
+
+static int sdhci_led_register(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ snprintf(host->led_name, sizeof(host->led_name),
+ "%s::", mmc_hostname(mmc));
+
+ host->led.name = host->led_name;
+ host->led.brightness = LED_OFF;
+ host->led.default_trigger = mmc_hostname(mmc);
+ host->led.brightness_set = sdhci_led_control;
+
+ return led_classdev_register(mmc_dev(mmc), &host->led);
+}
+
+static void sdhci_led_unregister(struct sdhci_host *host)
+{
+ led_classdev_unregister(&host->led);
+}
+
+static inline void sdhci_led_activate(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_led_deactivate(struct sdhci_host *host)
+{
+}
+
+#else
+
+static inline int sdhci_led_register(struct sdhci_host *host)
+{
+ return 0;
+}
+
+static inline void sdhci_led_unregister(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_led_activate(struct sdhci_host *host)
+{
+ __sdhci_led_activate(host);
+}
+
+static inline void sdhci_led_deactivate(struct sdhci_host *host)
+{
+ __sdhci_led_deactivate(host);
+}
+
#endif
/*****************************************************************************\
@@ -1091,23 +1130,14 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
return preset;
}
-void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
+ unsigned int *actual_clock)
{
int div = 0; /* Initialized for compiler warning */
int real_div = div, clk_mul = 1;
u16 clk = 0;
- unsigned long timeout;
bool switch_base_clk = false;
- host->mmc->actual_clock = 0;
-
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
- if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
- mdelay(1);
-
- if (clock == 0)
- return;
-
if (host->version >= SDHCI_SPEC_300) {
if (host->preset_enabled) {
u16 pre_val;
@@ -1184,10 +1214,29 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
clock_set:
if (real_div)
- host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+ *actual_clock = (host->max_clk * clk_mul) / real_div;
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(sdhci_calc_clk);
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ unsigned long timeout;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1319,8 +1368,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host = mmc_priv(mmc);
- sdhci_runtime_pm_get(host);
-
/* Firstly check card presence */
present = mmc->ops->get_cd(mmc);
@@ -1328,9 +1375,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
-#ifndef SDHCI_USE_LEDS_CLASS
- sdhci_activate_led(host);
-#endif
+ sdhci_led_activate(host);
/*
* Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
@@ -1405,11 +1450,11 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
-static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
+ struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
u8 ctrl;
- struct mmc_host *mmc = host->mmc;
spin_lock_irqsave(&host->lock, flags);
@@ -1563,18 +1608,10 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static int sdhci_get_cd(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
-
- sdhci_runtime_pm_get(host);
- sdhci_do_set_ios(host, ios);
- sdhci_runtime_pm_put(host);
-}
-
-static int sdhci_do_get_cd(struct sdhci_host *host)
-{
- int gpio_cd = mmc_gpio_get_cd(host->mmc);
+ int gpio_cd = mmc_gpio_get_cd(mmc);
if (host->flags & SDHCI_DEVICE_DEAD)
return 0;
@@ -1598,17 +1635,6 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
-static int sdhci_get_cd(struct mmc_host *mmc)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- int ret;
-
- sdhci_runtime_pm_get(host);
- ret = sdhci_do_get_cd(host);
- sdhci_runtime_pm_put(host);
- return ret;
-}
-
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
@@ -1633,8 +1659,9 @@ static int sdhci_check_ro(struct sdhci_host *host)
#define SAMPLE_COUNT 5
-static int sdhci_do_get_ro(struct sdhci_host *host)
+static int sdhci_get_ro(struct mmc_host *mmc)
{
+ struct sdhci_host *host = mmc_priv(mmc);
int i, ro_count;
if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
@@ -1659,17 +1686,6 @@ static void sdhci_hw_reset(struct mmc_host *mmc)
host->ops->hw_reset(host);
}
-static int sdhci_get_ro(struct mmc_host *mmc)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- int ret;
-
- sdhci_runtime_pm_get(host);
- ret = sdhci_do_get_ro(host);
- sdhci_runtime_pm_put(host);
- return ret;
-}
-
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
if (!(host->flags & SDHCI_DEVICE_DEAD)) {
@@ -1689,8 +1705,6 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
- sdhci_runtime_pm_get(host);
-
spin_lock_irqsave(&host->lock, flags);
if (enable)
host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1699,14 +1713,12 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
-
- sdhci_runtime_pm_put(host);
}
-static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
- struct mmc_ios *ios)
+static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
{
- struct mmc_host *mmc = host->mmc;
+ struct sdhci_host *host = mmc_priv(mmc);
u16 ctrl;
int ret;
@@ -1794,29 +1806,13 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
}
}
-static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
- struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- int err;
-
- if (host->version < SDHCI_SPEC_300)
- return 0;
- sdhci_runtime_pm_get(host);
- err = sdhci_do_start_signal_voltage_switch(host, ios);
- sdhci_runtime_pm_put(host);
- return err;
-}
-
static int sdhci_card_busy(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
u32 present_state;
- sdhci_runtime_pm_get(host);
/* Check whether DAT[3:0] is 0000 */
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- sdhci_runtime_pm_put(host);
return !(present_state & SDHCI_DATA_LVL_MASK);
}
@@ -1843,7 +1839,6 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
unsigned int tuning_count = 0;
bool hs400_tuning;
- sdhci_runtime_pm_get(host);
spin_lock_irqsave(&host->lock, flags);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
@@ -1879,8 +1874,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
break;
case MMC_TIMING_UHS_SDR50:
- if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
- host->flags & SDHCI_SDR104_NEEDS_TUNING)
+ if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
break;
/* FALLTHROUGH */
@@ -1891,7 +1885,6 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->ops->platform_execute_tuning) {
spin_unlock_irqrestore(&host->lock, flags);
err = host->ops->platform_execute_tuning(host, opcode);
- sdhci_runtime_pm_put(host);
return err;
}
@@ -2023,8 +2016,6 @@ out:
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
out_unlock:
spin_unlock_irqrestore(&host->lock, flags);
- sdhci_runtime_pm_put(host);
-
return err;
}
@@ -2105,7 +2096,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
if (host->ops->card_event)
host->ops->card_event(host);
- present = sdhci_do_get_cd(host);
+ present = sdhci_get_cd(host->mmc);
spin_lock_irqsave(&host->lock, flags);
@@ -2214,15 +2205,12 @@ static void sdhci_tasklet_finish(unsigned long param)
host->cmd = NULL;
host->data = NULL;
-#ifndef SDHCI_USE_LEDS_CLASS
- sdhci_deactivate_led(host);
-#endif
+ sdhci_led_deactivate(host);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
- sdhci_runtime_pm_put(host);
}
static void sdhci_timeout_timer(unsigned long data)
@@ -2679,7 +2667,7 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
- sdhci_do_set_ios(host, &host->mmc->ios);
+ sdhci_set_ios(host->mmc, &host->mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
@@ -2703,33 +2691,6 @@ int sdhci_resume_host(struct sdhci_host *host)
EXPORT_SYMBOL_GPL(sdhci_resume_host);
-static int sdhci_runtime_pm_get(struct sdhci_host *host)
-{
- return pm_runtime_get_sync(host->mmc->parent);
-}
-
-static int sdhci_runtime_pm_put(struct sdhci_host *host)
-{
- pm_runtime_mark_last_busy(host->mmc->parent);
- return pm_runtime_put_autosuspend(host->mmc->parent);
-}
-
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
-{
- if (host->bus_on)
- return;
- host->bus_on = true;
- pm_runtime_get_noresume(host->mmc->parent);
-}
-
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
-{
- if (!host->bus_on)
- return;
- host->bus_on = false;
- pm_runtime_put_noidle(host->mmc->parent);
-}
-
int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
unsigned long flags;
@@ -2768,8 +2729,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
- sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
- sdhci_do_set_ios(host, &host->mmc->ios);
+ sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios);
+ sdhci_set_ios(host->mmc, &host->mmc->ios);
if ((host_flags & SDHCI_PV_ENABLED) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
@@ -3014,7 +2975,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (!host->ops->get_max_clock) {
pr_err("%s: Hardware doesn't specify base clock frequency.\n",
mmc_hostname(mmc));
- return -ENODEV;
+ ret = -ENODEV;
+ goto undma;
}
host->max_clk = host->ops->get_max_clock(host);
}
@@ -3051,7 +3013,7 @@ int sdhci_add_host(struct sdhci_host *host)
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
- if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+ if (!mmc->f_max || mmc->f_max > max_clk)
mmc->f_max = max_clk;
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
@@ -3064,7 +3026,8 @@ int sdhci_add_host(struct sdhci_host *host)
} else {
pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
mmc_hostname(mmc));
- return -ENODEV;
+ ret = -ENODEV;
+ goto undma;
}
}
@@ -3118,8 +3081,9 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If there are external regulators, get them */
- if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto undma;
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
if (!IS_ERR(mmc->supply.vqmmc)) {
@@ -3174,10 +3138,6 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[1] & SDHCI_USE_SDR50_TUNING)
host->flags |= SDHCI_SDR50_NEEDS_TUNING;
- /* Does the host need tuning for SDR104 / HS200? */
- if (mmc->caps2 & MMC_CAP2_HS200)
- host->flags |= SDHCI_SDR104_NEEDS_TUNING;
-
/* Driver Type(s) (A, C, D) supported by the host */
if (caps[1] & SDHCI_DRIVER_TYPE_A)
mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
@@ -3276,7 +3236,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (mmc->ocr_avail == 0) {
pr_err("%s: Hardware doesn't report any support voltages.\n",
mmc_hostname(mmc));
- return -ENODEV;
+ ret = -ENODEV;
+ goto unreg;
}
spin_lock_init(&host->lock);
@@ -3360,25 +3321,18 @@ int sdhci_add_host(struct sdhci_host *host)
sdhci_dumpregs(host);
#endif
-#ifdef SDHCI_USE_LEDS_CLASS
- snprintf(host->led_name, sizeof(host->led_name),
- "%s::", mmc_hostname(mmc));
- host->led.name = host->led_name;
- host->led.brightness = LED_OFF;
- host->led.default_trigger = mmc_hostname(mmc);
- host->led.brightness_set = sdhci_led_control;
-
- ret = led_classdev_register(mmc_dev(mmc), &host->led);
+ ret = sdhci_led_register(host);
if (ret) {
pr_err("%s: Failed to register LED device: %d\n",
mmc_hostname(mmc), ret);
- goto reset;
+ goto unirq;
}
-#endif
mmiowb();
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto unled;
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
@@ -3390,15 +3344,25 @@ int sdhci_add_host(struct sdhci_host *host)
return 0;
-#ifdef SDHCI_USE_LEDS_CLASS
-reset:
+unled:
+ sdhci_led_unregister(host);
+unirq:
sdhci_do_reset(host, SDHCI_RESET_ALL);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
-#endif
untasklet:
tasklet_kill(&host->finish_tasklet);
+unreg:
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+undma:
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, host->align_buffer,
+ host->align_addr);
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
return ret;
}
@@ -3430,9 +3394,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
mmc_remove_host(mmc);
-#ifdef SDHCI_USE_LEDS_CLASS
- led_classdev_unregister(&host->led);
-#endif
+ sdhci_led_unregister(host);
if (!dead)
sdhci_do_reset(host, SDHCI_RESET_ALL);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0f39f4f84d10..609f87ca536b 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -417,11 +417,6 @@ struct sdhci_host {
#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
/* Broken Clock divider zero in controller */
#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
-/*
- * When internal clock is disabled, a delay is needed before modifying the
- * SD clock frequency or enabling back the internal clock.
- */
-#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -433,7 +428,7 @@ struct sdhci_host {
struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
struct led_classdev led; /* LED control */
char led_name[32];
#endif
@@ -450,7 +445,6 @@ struct sdhci_host {
#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
-#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
@@ -661,6 +655,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
}
+u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
+ unsigned int *actual_clock);
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d9a655f47d41..dd64b8663984 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -248,7 +248,6 @@ struct sh_mmcif_host {
int sg_idx;
int sg_blkidx;
bool power;
- bool card_present;
bool ccs_enable; /* Command Completion Signal support */
bool clk_ctrl2_enable;
struct mutex thread_lock;
@@ -1064,16 +1063,6 @@ static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
host->mmc->f_max, host->mmc->f_min);
}
-static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
-{
- struct mmc_host *mmc = host->mmc;
-
- if (!IS_ERR(mmc->supply.vmmc))
- /* Errors ignored... */
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
- ios->power_mode ? ios->vdd : 0);
-}
-
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
@@ -1091,42 +1080,32 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->state = STATE_IOS;
spin_unlock_irqrestore(&host->lock, flags);
- if (ios->power_mode == MMC_POWER_UP) {
- if (!host->card_present) {
- /* See if we also get DMA */
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ if (!host->power) {
+ clk_prepare_enable(host->clk);
+ pm_runtime_get_sync(dev);
+ sh_mmcif_sync_reset(host);
sh_mmcif_request_dma(host);
- host->card_present = true;
- }
- sh_mmcif_set_power(host, ios);
- } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
- /* clock stop */
- sh_mmcif_clock_control(host, 0);
- if (ios->power_mode == MMC_POWER_OFF) {
- if (host->card_present) {
- sh_mmcif_release_dma(host);
- host->card_present = false;
- }
+ host->power = true;
}
+ break;
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
if (host->power) {
- pm_runtime_put_sync(dev);
+ sh_mmcif_clock_control(host, 0);
+ sh_mmcif_release_dma(host);
+ pm_runtime_put(dev);
clk_disable_unprepare(host->clk);
host->power = false;
- if (ios->power_mode == MMC_POWER_OFF)
- sh_mmcif_set_power(host, ios);
- }
- host->state = STATE_IDLE;
- return;
- }
-
- if (ios->clock) {
- if (!host->power) {
- clk_prepare_enable(host->clk);
-
- pm_runtime_get_sync(dev);
- host->power = true;
- sh_mmcif_sync_reset(host);
}
+ break;
+ case MMC_POWER_ON:
sh_mmcif_clock_control(host, ios->clock);
+ break;
}
host->timing = ios->timing;
@@ -1519,23 +1498,23 @@ static int sh_mmcif_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- pm_runtime_enable(dev);
- host->power = false;
-
host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
dev_err(dev, "cannot get clock: %d\n", ret);
- goto err_pm;
+ goto err_host;
}
ret = clk_prepare_enable(host->clk);
if (ret < 0)
- goto err_pm;
+ goto err_host;
sh_mmcif_clk_setup(host);
- ret = pm_runtime_resume(dev);
+ pm_runtime_enable(dev);
+ host->power = false;
+
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_clk;
@@ -1579,12 +1558,13 @@ static int sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
clk_get_rate(host->clk) / 1000000UL);
+ pm_runtime_put(dev);
clk_disable_unprepare(host->clk);
return ret;
err_clk:
clk_disable_unprepare(host->clk);
-err_pm:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
err_host:
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 9aa147959276..f750f9494410 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -28,10 +28,12 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/sh_dma.h>
#include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl-state.h>
+#include <linux/regulator/consumer.h>
#include "tmio_mmc.h"
@@ -48,10 +50,8 @@ struct sh_mobile_sdhi_of_data {
unsigned bus_shift;
};
-static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
- {
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
- },
+static const struct sh_mobile_sdhi_of_data of_default_cfg = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
};
static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
@@ -62,7 +62,7 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
- TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
@@ -70,17 +70,16 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
- TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
- .capabilities = MMC_CAP_SD_HIGHSPEED,
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.bus_shift = 2,
};
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-shmobile" },
- { .compatible = "renesas,sdhi-sh7372" },
- { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
+ { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
+ { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
@@ -97,6 +96,8 @@ struct sh_mobile_sdhi {
struct clk *clk;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default, *pins_uhs;
};
static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
@@ -131,16 +132,28 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
sd_ctrl_write16(host, EXT_ACC, val);
}
-static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f)
+static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mmc_host *mmc = host->mmc;
struct sh_mobile_sdhi *priv = host_to_priv(host);
int ret = clk_prepare_enable(priv->clk);
if (ret < 0)
return ret;
- *f = clk_get_rate(priv->clk);
+ /*
+ * The clock driver may not know what maximum frequency
+ * actually works, so it should be set with the max-frequency
+ * property which will already have been read to f_max. If it
+ * was missing, assume the current frequency is the maximum.
+ */
+ if (!mmc->f_max)
+ mmc->f_max = clk_get_rate(priv->clk);
+
+ /*
+ * Minimum frequency is the minimum input clock frequency
+ * divided by our maximum divider.
+ */
+ mmc->f_min = max(clk_round_rate(priv->clk, 1) / 512, 1L);
/* enable 16bit data access on SDBUF as default */
sh_mobile_sdhi_sdbuf_width(host, 16);
@@ -148,19 +161,92 @@ static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int
return 0;
}
-static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
+static unsigned int sh_mobile_sdhi_clk_update(struct tmio_mmc_host *host,
+ unsigned int new_clock)
+{
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ unsigned int freq, diff, best_freq = 0, diff_min = ~0;
+ int i, ret;
+
+ /* tested only on RCar Gen2+ currently; may work for others */
+ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+ return clk_get_rate(priv->clk);
+
+ /*
+ * We want the bus clock to be as close as possible to, but no
+ * greater than, new_clock. As we can divide by 1 << i for
+ * any i in [0, 9] we want the input clock to be as close as
+ * possible, but no greater than, new_clock << i.
+ */
+ for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
+ freq = clk_round_rate(priv->clk, new_clock << i);
+ if (freq > (new_clock << i)) {
+ /* Too fast; look for a slightly slower option */
+ freq = clk_round_rate(priv->clk,
+ (new_clock << i) / 4 * 3);
+ if (freq > (new_clock << i))
+ continue;
+ }
+
+ diff = new_clock - (freq >> i);
+ if (diff <= diff_min) {
+ best_freq = freq;
+ diff_min = diff;
+ }
+ }
+
+ ret = clk_set_rate(priv->clk, best_freq);
+
+ return ret == 0 ? best_freq : clk_get_rate(priv->clk);
+}
+
+static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = host_to_priv(host);
+
clk_disable_unprepare(priv->clk);
}
+static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ struct pinctrl_state *pin_state;
+ int ret;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ pin_state = priv->pins_default;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ pin_state = priv->pins_uhs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * If anything is missing, assume signal voltage is fixed at
+ * 3.3V and succeed/fail accordingly.
+ */
+ if (IS_ERR(priv->pinctrl) || IS_ERR(pin_state))
+ return ios->signal_voltage ==
+ MMC_SIGNAL_VOLTAGE_330 ? 0 : -EINVAL;
+
+ ret = mmc_regulator_set_vqmmc(host->mmc, ios);
+ if (ret)
+ return ret;
+
+ return pinctrl_select_state(priv->pinctrl, pin_state);
+}
+
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
{
int timeout = 1000;
- while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13)))
+ while (--timeout && !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS)
+ & TMIO_STAT_SCLKDIVEN))
udelay(1);
if (!timeout) {
@@ -226,7 +312,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
struct tmio_mmc_host *host;
struct resource *res;
int irq, ret, i = 0;
- bool multiplexed_isr = true;
struct tmio_mmc_dma *dma_priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -247,6 +332,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
+ priv->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(priv->pinctrl)) {
+ priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl,
+ "state_uhs");
+ }
+
host = tmio_mmc_host_alloc(pdev);
if (!host) {
ret = -ENOMEM;
@@ -267,8 +360,10 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
host->dma = dma_priv;
host->write16_hook = sh_mobile_sdhi_write16_hook;
host->clk_enable = sh_mobile_sdhi_clk_enable;
+ host->clk_update = sh_mobile_sdhi_clk_update;
host->clk_disable = sh_mobile_sdhi_clk_disable;
host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
+ host->start_signal_voltage_switch = sh_mobile_sdhi_start_signal_voltage_switch;
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
@@ -308,63 +403,24 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
if (ret < 0)
goto efree;
- /*
- * Allow one or more specific (named) ISRs or
- * one or more multiplexed (un-named) ISRs.
- */
-
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
- if (irq >= 0) {
- multiplexed_isr = false;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0,
- dev_name(&pdev->dev), host);
- if (ret)
- goto eirq;
- }
-
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
- if (irq >= 0) {
- multiplexed_isr = false;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0,
+ while (1) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+ i++;
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
}
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD);
- if (irq >= 0) {
- multiplexed_isr = false;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0,
- dev_name(&pdev->dev), host);
- if (ret)
- goto eirq;
- } else if (!multiplexed_isr) {
- dev_err(&pdev->dev,
- "Principal SD-card IRQ is missing among named interrupts\n");
+ /* There must be at least one IRQ source */
+ if (!i) {
ret = irq;
goto eirq;
}
- if (multiplexed_isr) {
- while (1) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0)
- break;
- i++;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
- dev_name(&pdev->dev), host);
- if (ret)
- goto eirq;
- }
-
- /* There must be at least one IRQ source */
- if (!i) {
- ret = irq;
- goto eirq;
- }
- }
-
- dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
+ dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
mmc_hostname(host->mmc), (unsigned long)
(platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
host->mmc->f_max / 1000000);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 4a597f5a53e2..1aac2ad8edf2 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -1,6 +1,8 @@
/*
* linux/drivers/mmc/host/tmio_mmc.h
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
@@ -18,12 +20,67 @@
#include <linux/dmaengine.h>
#include <linux/highmem.h>
-#include <linux/mmc/tmio.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+/* driver merges STATUS and following STATUS2 */
+#define CTL_STATUS 0x1c
+/* driver merges IRQ_MASK and following IRQ_MASK2 */
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_DMA_ENABLE 0xd8
+#define CTL_RESET_SD 0xe0
+#define CTL_VERSION 0xe2
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND BIT(0)
+#define TMIO_STAT_DATAEND BIT(2)
+#define TMIO_STAT_CARD_REMOVE BIT(3)
+#define TMIO_STAT_CARD_INSERT BIT(4)
+#define TMIO_STAT_SIGSTATE BIT(5)
+#define TMIO_STAT_WRPROTECT BIT(7)
+#define TMIO_STAT_CARD_REMOVE_A BIT(8)
+#define TMIO_STAT_CARD_INSERT_A BIT(9)
+#define TMIO_STAT_SIGSTATE_A BIT(10)
+
+/* These belong technically to CTRL_STATUS2, but the driver merges them */
+#define TMIO_STAT_CMD_IDX_ERR BIT(16)
+#define TMIO_STAT_CRCFAIL BIT(17)
+#define TMIO_STAT_STOPBIT_ERR BIT(18)
+#define TMIO_STAT_DATATIMEOUT BIT(19)
+#define TMIO_STAT_RXOVERFLOW BIT(20)
+#define TMIO_STAT_TXUNDERRUN BIT(21)
+#define TMIO_STAT_CMDTIMEOUT BIT(22)
+#define TMIO_STAT_DAT0 BIT(23) /* only known on R-Car so far */
+#define TMIO_STAT_RXRDY BIT(24)
+#define TMIO_STAT_TXRQ BIT(25)
+#define TMIO_STAT_ILL_FUNC BIT(29) /* only when !TMIO_MMC_HAS_IDLE_WAIT */
+#define TMIO_STAT_SCLKDIVEN BIT(29) /* only when TMIO_MMC_HAS_IDLE_WAIT */
+#define TMIO_STAT_CMD_BUSY BIT(30)
+#define TMIO_STAT_ILL_ACCESS BIT(31)
+
+#define CLK_CTL_DIV_MASK 0xff
+#define CLK_CTL_SCLKEN BIT(8)
+
+#define TMIO_BBS 512 /* Boot block size */
+
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
#define TMIO_SDIO_STAT_EXPUB52 0x4000
@@ -95,10 +152,14 @@ struct tmio_mmc_host {
bool sdio_irq_enabled;
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
- void (*clk_disable)(struct platform_device *pdev);
+ int (*clk_enable)(struct tmio_mmc_host *host);
+ unsigned int (*clk_update)(struct tmio_mmc_host *host,
+ unsigned int new_clock);
+ void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+ int (*start_signal_voltage_switch)(struct mmc_host *mmc,
+ struct mmc_ios *ios);
};
struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
@@ -111,9 +172,6 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
irqreturn_t tmio_mmc_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid);
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
@@ -177,7 +235,7 @@ static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
readsw(host->ctl + (addr << host->bus_shift), buf, count);
}
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift)) |
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
@@ -199,11 +257,10 @@ static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
writesw(host->ctl + (addr << host->bus_shift), buf, count);
}
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
-
#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 7fb0c034dcb6..fa8a936a3d9b 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -15,7 +15,6 @@
#include <linux/dmaengine.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/tmio.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 0521b4662748..f44e2ab7aea2 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -39,7 +39,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
-#include <linux/mmc/tmio.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
@@ -56,18 +55,18 @@
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
- sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
- sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
- sd_ctrl_write32(host, CTL_STATUS, ~i);
+ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
}
static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
@@ -154,31 +153,16 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
}
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
- unsigned int new_clock)
+static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- u32 clk = 0, clock;
-
- if (new_clock) {
- for (clock = host->mmc->f_min, clk = 0x80000080;
- new_clock >= (clock << 1);
- clk >>= 1)
- clock <<= 1;
-
- /* 1/1 clock is option */
- if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
- ((clk >> 22) & 0x1))
- clk |= 0xff;
- }
-
- if (host->set_clk_div)
- host->set_clk_div(host->pdev, (clk >> 22) & 1);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
- if (!(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG))
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
+ }
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
@@ -190,19 +174,41 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 5 : 10);
+ msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
}
-static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
+static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
+ unsigned int new_clock)
{
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 1 : 10);
+ u32 clk = 0, clock;
- if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
+ if (new_clock == 0) {
+ tmio_mmc_clk_stop(host);
+ return;
}
+
+ if (host->clk_update)
+ clock = host->clk_update(host, new_clock) / 512;
+ else
+ clock = host->mmc->f_min;
+
+ for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+ clock <<= 1;
+
+ /* 1/1 clock is option */
+ if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
+ clk |= 0xff;
+
+ if (host->set_clk_div)
+ host->set_clk_div(host->pdev, (clk >> 22) & 1);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+ msleep(10);
+
+ tmio_mmc_clk_start(host);
}
static void tmio_mmc_reset(struct tmio_mmc_host *host)
@@ -264,9 +270,6 @@ static void tmio_mmc_reset_work(struct work_struct *work)
tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(host->mmc));
- pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
/* called with host->lock held, interrupts disabled */
@@ -296,9 +299,6 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(host->mmc));
- pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
static void tmio_mmc_done_work(struct work_struct *work)
@@ -375,7 +375,7 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
tmio_mmc_enable_mmc_irqs(host, irq_mask);
/* Fire off the command */
- sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
+ sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
sd_ctrl_write16(host, CTL_SD_CMD, c);
return 0;
@@ -530,7 +530,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
- u32 status = sd_ctrl_read32(host, CTL_STATUS);
+ u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
bool done = false;
/*
@@ -542,7 +542,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
* waiting for one more interrupt fixes the problem.
*/
if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
- if (status & TMIO_STAT_ILL_FUNC)
+ if (status & TMIO_STAT_SCLKDIVEN)
done = true;
} else {
if (!(status & TMIO_STAT_CMD_BUSY))
@@ -585,7 +585,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
*/
for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
- cmd->resp[i] = sd_ctrl_read32(host, addr);
+ cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
@@ -625,19 +625,6 @@ out:
spin_unlock(&host->lock);
}
-static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
- int *ireg, int *status)
-{
- *status = sd_ctrl_read32(host, CTL_STATUS);
- *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
-
- pr_debug_status(*status);
- pr_debug_status(*ireg);
-
- /* Clear the status except the interrupt status */
- sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ);
-}
-
static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
int ireg, int status)
{
@@ -657,18 +644,6 @@ static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
return false;
}
-irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid)
-{
- unsigned int ireg, status;
- struct tmio_mmc_host *host = devid;
-
- tmio_mmc_card_irq_status(host, &ireg, &status);
- __tmio_mmc_card_detect_irq(host, ireg, status);
-
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(tmio_mmc_card_detect_irq);
-
static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
int ireg, int status)
{
@@ -698,19 +673,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
return false;
}
-irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid)
-{
- unsigned int ireg, status;
- struct tmio_mmc_host *host = devid;
-
- tmio_mmc_card_irq_status(host, &ireg, &status);
- __tmio_mmc_sdcard_irq(host, ireg, status);
-
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(tmio_mmc_sdcard_irq);
-
-irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
+static void tmio_mmc_sdio_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
struct mmc_host *mmc = host->mmc;
@@ -719,7 +682,7 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
- return IRQ_HANDLED;
+ return;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
@@ -732,19 +695,22 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
-
- return IRQ_HANDLED;
}
-EXPORT_SYMBOL(tmio_mmc_sdio_irq);
irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
unsigned int ireg, status;
- pr_debug("MMC IRQ begin\n");
+ status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
+ ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
+
+ pr_debug_status(status);
+ pr_debug_status(ireg);
+
+ /* Clear the status except the interrupt status */
+ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
- tmio_mmc_card_irq_status(host, &ireg, &status);
if (__tmio_mmc_card_detect_irq(host, ireg, status))
return IRQ_HANDLED;
if (__tmio_mmc_sdcard_irq(host, ireg, status))
@@ -812,8 +778,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_get_sync(mmc_dev(mmc));
-
if (mrq->data) {
ret = tmio_mmc_start_data(host, mrq->data);
if (ret)
@@ -832,24 +796,14 @@ fail:
host->mrq = NULL;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
-static int tmio_mmc_clk_update(struct tmio_mmc_host *host)
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
{
- struct mmc_host *mmc = host->mmc;
- int ret;
-
if (!host->clk_enable)
return -ENOTSUPP;
- ret = host->clk_enable(host->pdev, &mmc->f_max);
- if (!ret)
- mmc->f_min = mmc->f_max / 512;
-
- return ret;
+ return host->clk_enable(host);
}
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
@@ -925,8 +879,6 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct device *dev = &host->pdev->dev;
unsigned long flags;
- pm_runtime_get_sync(mmc_dev(mmc));
-
mutex_lock(&host->ios_lock);
spin_lock_irqsave(&host->lock, flags);
@@ -959,14 +911,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
tmio_mmc_clk_stop(host);
break;
case MMC_POWER_UP:
- tmio_mmc_set_clock(host, ios->clock);
tmio_mmc_power_on(host, ios->vdd);
- tmio_mmc_clk_start(host);
+ tmio_mmc_set_clock(host, ios->clock);
tmio_mmc_set_bus_width(host, ios->bus_width);
break;
case MMC_POWER_ON:
tmio_mmc_set_clock(host, ios->clock);
- tmio_mmc_clk_start(host);
tmio_mmc_set_bus_width(host, ios->bus_width);
break;
}
@@ -983,9 +933,6 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->clk_cache = ios->clock;
mutex_unlock(&host->ios_lock);
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -996,11 +943,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
if (ret >= 0)
return ret;
- pm_runtime_get_sync(mmc_dev(mmc));
ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
+ (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
return ret;
}
@@ -1016,12 +960,20 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static const struct mmc_host_ops tmio_mmc_ops = {
+static int tmio_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
+}
+
+static struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
+ .card_busy = tmio_mmc_card_busy,
.multi_io_quirk = tmio_multi_io_quirk,
};
@@ -1120,7 +1072,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
goto host_free;
}
+ tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
mmc->ops = &tmio_mmc_ops;
+
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
@@ -1135,7 +1089,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->caps & MMC_CAP_NONREMOVABLE ||
mmc->slot.cd_irq >= 0);
- if (tmio_mmc_clk_update(_host) < 0) {
+ if (tmio_mmc_clk_enable(_host) < 0) {
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
}
@@ -1159,7 +1113,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
tmio_mmc_clk_stop(_host);
tmio_mmc_reset(_host);
- _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
+ _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
/* Unmask the IRQs we want to know about */
@@ -1251,7 +1205,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
tmio_mmc_clk_stop(host);
if (host->clk_disable)
- host->clk_disable(host->pdev);
+ host->clk_disable(host);
return 0;
}
@@ -1263,12 +1217,10 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_host *host = mmc_priv(mmc);
tmio_mmc_reset(host);
- tmio_mmc_clk_update(host);
+ tmio_mmc_clk_enable(host);
- if (host->clk_cache) {
+ if (host->clk_cache)
tmio_mmc_set_clock(host, host->clk_cache);
- tmio_mmc_clk_start(host);
- }
tmio_mmc_enable_dma(host, true);
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
index e2cdd5fb1423..553ef41bb806 100644
--- a/drivers/mmc/host/toshsd.c
+++ b/drivers/mmc/host/toshsd.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 807c06e203c3..1bd5f1a18d4e 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -22,6 +22,7 @@
#include <linux/mmc/sdio.h>
#include <linux/module.h>
#include <linux/pagemap.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
@@ -198,6 +199,11 @@ struct usdhi6_host {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
bool dma_active;
+
+ /* Pin control */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_uhs;
};
/* I/O primitives */
@@ -1147,12 +1153,45 @@ static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
}
+static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
+{
+ if (IS_ERR(host->pins_uhs))
+ return 0;
+
+ switch (voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ case MMC_SIGNAL_VOLTAGE_120:
+ return pinctrl_select_state(host->pinctrl,
+ host->pins_uhs);
+
+ default:
+ return pinctrl_select_state(host->pinctrl,
+ host->pins_default);
+ }
+}
+
+static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ int ret;
+
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ if (ret < 0)
+ return ret;
+
+ ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
+ if (ret)
+ dev_warn_once(mmc_dev(mmc),
+ "Failed to set pinstate err=%d\n", ret);
+ return ret;
+}
+
static struct mmc_host_ops usdhi6_ops = {
.request = usdhi6_request,
.set_ios = usdhi6_set_ios,
.get_cd = usdhi6_get_cd,
.get_ro = usdhi6_get_ro,
.enable_sdio_irq = usdhi6_enable_sdio_irq,
+ .start_signal_voltage_switch = usdhi6_sig_volt_switch,
};
/* State machine handlers */
@@ -1730,6 +1769,25 @@ static int usdhi6_probe(struct platform_device *pdev)
host->wait = USDHI6_WAIT_FOR_REQUEST;
host->timeout = msecs_to_jiffies(4000);
+ host->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ ret = PTR_ERR(host->pinctrl);
+ goto e_free_mmc;
+ }
+
+ host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
+ if (!IS_ERR(host->pins_uhs)) {
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+
+ if (IS_ERR(host->pins_default)) {
+ dev_err(dev,
+ "UHS pinctrl requires a default pin state.\n");
+ ret = PTR_ERR(host->pins_default);
+ goto e_free_mmc;
+ }
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(dev, res);
if (IS_ERR(host->base)) {
@@ -1785,7 +1843,7 @@ static int usdhi6_probe(struct platform_device *pdev)
mmc->ops = &usdhi6_ops;
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ;
+ MMC_CAP_SDIO_IRQ;
/* Set .max_segs to some random number. Feel free to adjust. */
mmc->max_segs = 32;
mmc->max_blk_size = 512;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f4701182b558..74ae24364a8d 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -409,7 +409,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto error3;
if (tr->flush)
- blk_queue_flush(new->rq, REQ_FLUSH);
+ blk_queue_write_cache(new->rq, true, false);
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 309625130b21..bee180bd11e7 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/kconfig.h>
+#include <linux/leds.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -862,6 +863,7 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
mtd_erase_callback(instr);
return 0;
}
+ ledtrig_mtd_activity();
return mtd->_erase(mtd, instr);
}
EXPORT_SYMBOL_GPL(mtd_erase);
@@ -925,6 +927,7 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
if (!len)
return 0;
+ ledtrig_mtd_activity();
/*
* In the absence of an error, drivers return a non-negative integer
* representing the maximum number of bitflips that were corrected on
@@ -949,6 +952,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
return -EROFS;
if (!len)
return 0;
+ ledtrig_mtd_activity();
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
@@ -982,6 +986,8 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
ops->retlen = ops->oobretlen = 0;
if (!mtd->_read_oob)
return -EOPNOTSUPP;
+
+ ledtrig_mtd_activity();
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
@@ -997,6 +1003,19 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
}
EXPORT_SYMBOL_GPL(mtd_read_oob);
+int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->_write_oob)
+ return -EOPNOTSUPP;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ ledtrig_mtd_activity();
+ return mtd->_write_oob(mtd, to, ops);
+}
+EXPORT_SYMBOL_GPL(mtd_write_oob);
+
/*
* Method to access the protection register area, present in some flash
* devices. The user data is one time programmable but the factory data is read
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 557b8462f55e..ba4f603e0537 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -43,7 +43,6 @@
#include <linux/mtd/nand_bch.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
-#include <linux/leds.h>
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/of_mtd.h>
@@ -97,12 +96,6 @@ static int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops);
-/*
- * For devices which display every fart in the system on a separate LED. Is
- * compiled away when LED support is disabled.
- */
-DEFINE_LED_TRIGGER(nand_led_trigger);
-
static int check_offs_len(struct mtd_info *mtd,
loff_t ofs, uint64_t len)
{
@@ -540,19 +533,16 @@ void nand_wait_ready(struct mtd_info *mtd)
if (in_interrupt() || oops_in_progress)
return panic_nand_wait_ready(mtd, timeo);
- led_trigger_event(nand_led_trigger, LED_FULL);
/* Wait until command is processed or timeout occurs */
timeo = jiffies + msecs_to_jiffies(timeo);
do {
if (chip->dev_ready(mtd))
- goto out;
+ return;
cond_resched();
} while (time_before(jiffies, timeo));
if (!chip->dev_ready(mtd))
pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
-out:
- led_trigger_event(nand_led_trigger, LED_OFF);
}
EXPORT_SYMBOL_GPL(nand_wait_ready);
@@ -885,8 +875,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
int status;
unsigned long timeo = 400;
- led_trigger_event(nand_led_trigger, LED_FULL);
-
/*
* Apply this short delay always to ensure that we do wait tWB in any
* case on any machine.
@@ -910,7 +898,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
cond_resched();
} while (time_before(jiffies, timeo));
}
- led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
/* This can happen if in case of timeout or buggy dev_ready */
@@ -4466,20 +4453,6 @@ void nand_release(struct mtd_info *mtd)
}
EXPORT_SYMBOL_GPL(nand_release);
-static int __init nand_base_init(void)
-{
- led_trigger_register_simple("nand-disk", &nand_led_trigger);
- return 0;
-}
-
-static void __exit nand_base_exit(void)
-{
- led_trigger_unregister_simple(nand_led_trigger);
-}
-
-module_init(nand_base_init);
-module_exit(nand_base_exit);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index eb6663866c9f..78dbc44540f6 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -635,10 +635,10 @@ static int receive(struct net_device *dev, int cnt)
#ifdef __i386__
#include <asm/msr.h>
-#define GETTICK(x) \
-({ \
- if (cpu_has_tsc) \
- x = (unsigned int)rdtsc(); \
+#define GETTICK(x) \
+({ \
+ if (boot_cpu_has(X86_FEATURE_TSC)) \
+ x = (unsigned int)rdtsc(); \
})
#else /* __i386__ */
#define GETTICK(x)
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index c894841c6456..d296fc3ae06e 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -18,7 +18,7 @@ config BLK_DEV_NVME_SCSI
depends on NVME_CORE
---help---
This adds support for the SG_IO ioctl on the NVMe character
- and block devices nodes, as well a a translation for a small
+ and block devices nodes, as well as a translation for a small
number of selected SCSI commands to NVMe commands to the NVMe
driver. If you don't know what this means you probably want
to say N here, unless you run a distro that abuses the SCSI
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 643f457131c2..2de248bd462b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -58,6 +58,55 @@ static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ enum nvme_ctrl_state new_state)
+{
+ enum nvme_ctrl_state old_state = ctrl->state;
+ bool changed = false;
+
+ spin_lock_irq(&ctrl->lock);
+ switch (new_state) {
+ case NVME_CTRL_LIVE:
+ switch (old_state) {
+ case NVME_CTRL_RESETTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_RESETTING:
+ switch (old_state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DELETING:
+ switch (old_state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irq(&ctrl->lock);
+
+ if (changed)
+ ctrl->state = new_state;
+
+ return changed;
+}
+EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+
static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
@@ -138,6 +187,111 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
+static inline void nvme_setup_flush(struct nvme_ns *ns,
+ struct nvme_command *cmnd)
+{
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+}
+
+static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct nvme_dsm_range *range;
+ struct page *page;
+ int offset;
+ unsigned int nr_bytes = blk_rq_bytes(req);
+
+ range = kmalloc(sizeof(*range), GFP_ATOMIC);
+ if (!range)
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ range->cattr = cpu_to_le32(0);
+ range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->dsm.opcode = nvme_cmd_dsm;
+ cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->dsm.nr = 0;
+ cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ req->completion_data = range;
+ page = virt_to_page(range);
+ offset = offset_in_page(range);
+ blk_add_request_payload(req, page, offset, sizeof(*range));
+
+ /*
+ * we set __data_len back to the size of the area to be discarded
+ * on disk. This allows us to report completion on the full amount
+ * of blocks described by the request.
+ */
+ req->__data_len = nr_bytes;
+
+ return 0;
+}
+
+static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd)
+{
+ u16 control = 0;
+ u32 dsmgmt = 0;
+
+ if (req->cmd_flags & REQ_FUA)
+ control |= NVME_RW_FUA;
+ if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ control |= NVME_RW_LR;
+
+ if (req->cmd_flags & REQ_RAHEAD)
+ dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd->rw.command_id = req->tag;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+ if (ns->ms) {
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE3:
+ control |= NVME_RW_PRINFO_PRCHK_GUARD;
+ break;
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ control |= NVME_RW_PRINFO_PRCHK_GUARD |
+ NVME_RW_PRINFO_PRCHK_REF;
+ cmnd->rw.reftag = cpu_to_le32(
+ nvme_block_nr(ns, blk_rq_pos(req)));
+ break;
+ }
+ if (!blk_integrity_rq(req))
+ control |= NVME_RW_PRINFO_PRACT;
+ }
+
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+}
+
+int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmd)
+{
+ int ret = 0;
+
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ memcpy(cmd, req->cmd, sizeof(*cmd));
+ else if (req->cmd_flags & REQ_FLUSH)
+ nvme_setup_flush(ns, cmd);
+ else if (req->cmd_flags & REQ_DISCARD)
+ ret = nvme_setup_discard(ns, req, cmd);
+ else
+ nvme_setup_rw(ns, req, cmd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_setup_cmd);
+
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
@@ -894,6 +1048,8 @@ EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
struct request_queue *q)
{
+ bool vwc = false;
+
if (ctrl->max_hw_sectors) {
u32 max_segments =
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
@@ -903,9 +1059,10 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
}
if (ctrl->stripe_size)
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ vwc = true;
+ blk_queue_write_cache(q, vwc, vwc);
}
/*
@@ -1272,7 +1429,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
- list_add_tail(&ns->list, &ctrl->namespaces);
+ list_add_tail_rcu(&ns->list, &ctrl->namespaces);
kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM)
return;
@@ -1295,6 +1452,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_ns_remove(struct nvme_ns *ns)
{
+ lockdep_assert_held(&ns->ctrl->namespaces_mutex);
+
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
@@ -1307,9 +1466,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
- mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
- mutex_unlock(&ns->ctrl->namespaces_mutex);
+ synchronize_rcu();
nvme_put_ns(ns);
}
@@ -1361,7 +1519,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
return ret;
}
-static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns, *next;
unsigned i;
@@ -1377,11 +1535,16 @@ static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
}
}
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
+static void nvme_scan_work(struct work_struct *work)
{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, scan_work);
struct nvme_id_ctrl *id;
unsigned nn;
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
if (nvme_identify_ctrl(ctrl, &id))
return;
@@ -1392,23 +1555,86 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
if (!nvme_scan_ns_list(ctrl, nn))
goto done;
}
- __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
+ nvme_scan_ns_sequential(ctrl, nn);
done:
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
+
+ if (ctrl->ops->post_scan)
+ ctrl->ops->post_scan(ctrl);
+}
+
+void nvme_queue_scan(struct nvme_ctrl *ctrl)
+{
+ /*
+ * Do not queue new scan work when a controller is reset during
+ * removal.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ schedule_work(&ctrl->scan_work);
}
-EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
+EXPORT_SYMBOL_GPL(nvme_queue_scan);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
+ mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
+static void nvme_async_event_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, async_event_work);
+
+ spin_lock_irq(&ctrl->lock);
+ while (ctrl->event_limit > 0) {
+ int aer_idx = --ctrl->event_limit;
+
+ spin_unlock_irq(&ctrl->lock);
+ ctrl->ops->submit_async_event(ctrl, aer_idx);
+ spin_lock_irq(&ctrl->lock);
+ }
+ spin_unlock_irq(&ctrl->lock);
+}
+
+void nvme_complete_async_event(struct nvme_ctrl *ctrl,
+ struct nvme_completion *cqe)
+{
+ u16 status = le16_to_cpu(cqe->status) >> 1;
+ u32 result = le32_to_cpu(cqe->result);
+
+ if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+ ++ctrl->event_limit;
+ schedule_work(&ctrl->async_event_work);
+ }
+
+ if (status != NVME_SC_SUCCESS)
+ return;
+
+ switch (result & 0xff07) {
+ case NVME_AER_NOTICE_NS_CHANGED:
+ dev_info(ctrl->device, "rescanning\n");
+ nvme_queue_scan(ctrl);
+ break;
+ default:
+ dev_warn(ctrl->device, "async event result %08x\n", result);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+
+void nvme_queue_async_events(struct nvme_ctrl *ctrl)
+{
+ ctrl->event_limit = NVME_NR_AERS;
+ schedule_work(&ctrl->async_event_work);
+}
+EXPORT_SYMBOL_GPL(nvme_queue_async_events);
+
static DEFINE_IDA(nvme_instance_ida);
static int nvme_set_instance(struct nvme_ctrl *ctrl)
@@ -1440,6 +1666,10 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl)
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ flush_work(&ctrl->async_event_work);
+ flush_work(&ctrl->scan_work);
+ nvme_remove_namespaces(ctrl);
+
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
spin_lock(&dev_list_lock);
@@ -1475,12 +1705,16 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
{
int ret;
+ ctrl->state = NVME_CTRL_NEW;
+ spin_lock_init(&ctrl->lock);
INIT_LIST_HEAD(&ctrl->namespaces);
mutex_init(&ctrl->namespaces_mutex);
kref_init(&ctrl->kref);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
+ INIT_WORK(&ctrl->scan_work, nvme_scan_work);
+ INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
ret = nvme_set_instance(ctrl);
if (ret)
@@ -1520,8 +1754,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
if (!kref_get_unless_zero(&ns->kref))
continue;
@@ -1538,7 +1772,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
nvme_put_ns(ns);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
@@ -1546,8 +1780,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
spin_lock_irq(ns->queue->queue_lock);
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock);
@@ -1555,7 +1789,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -1563,13 +1797,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->namespaces_mutex);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
- mutex_unlock(&ctrl->namespaces_mutex);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
@@ -1607,9 +1841,9 @@ int __init nvme_core_init(void)
void nvme_core_exit(void)
{
- unregister_blkdev(nvme_major, "nvme");
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+ unregister_blkdev(nvme_major, "nvme");
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 9461dd639acd..a0af0558354c 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -367,8 +367,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
(struct nvme_command *)&c, entries, len);
if (ret) {
- dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
- ret);
+ dev_err(ns->ctrl->device,
+ "L2P table transfer failed (%d)\n", ret);
ret = -EIO;
goto out;
}
@@ -387,41 +387,16 @@ out:
return ret;
}
-static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
- int nr_dst_blks, u8 *dst_blks,
- int nr_src_blks, u8 *src_blks)
-{
- int blk, offset, pl, blktype;
-
- for (blk = 0; blk < nr_dst_blks; blk++) {
- offset = blk * nvmdev->plane_mode;
- blktype = src_blks[offset];
-
- /* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < nvmdev->plane_mode; pl++) {
- if (src_blks[offset + pl] &
- (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
- blktype = src_blks[offset + pl];
- break;
- }
- }
-
- dst_blks[blk] = blktype;
- }
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
- void *priv)
+ u8 *blks)
{
struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- u8 *dst_blks = NULL;
- int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
+ int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -432,54 +407,43 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;
- dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
- if (!dst_blks) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
- dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
+ dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
ret = -EIO;
goto out;
}
if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
- dev_err(ctrl->dev, "bbt format mismatch\n");
+ dev_err(ctrl->device, "bbt format mismatch\n");
ret = -EINVAL;
goto out;
}
if (le16_to_cpu(bb_tbl->verid) != 1) {
ret = -EINVAL;
- dev_err(ctrl->dev, "bbt version not supported\n");
+ dev_err(ctrl->device, "bbt version not supported\n");
goto out;
}
- if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
+ if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
ret = -EINVAL;
- dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_src_blks);
+ dev_err(ctrl->device,
+ "bbt unsuspected blocks returned (%u!=%u)",
+ le32_to_cpu(bb_tbl->tblks), nr_blks);
goto out;
}
- nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
- nr_src_blks, bb_tbl->blk);
-
- ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
-
+ memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
out:
- kfree(dst_blks);
kfree(bb_tbl);
return ret;
}
-static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
- int type)
+static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
+ int nr_ppas, int type)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_nvm_command c = {};
@@ -487,14 +451,15 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
c.set_bb.nsid = cpu_to_le32(ns->ns_id);
- c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
- c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
+ c.set_bb.spba = cpu_to_le64(ppas->ppa);
+ c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
c.set_bb.value = type;
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
NULL, 0);
if (ret)
- dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
+ dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
+ ret);
return ret;
}
@@ -504,8 +469,9 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
c->ph_rw.opcode = rqd->opcode;
c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
+ c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
- c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
+ c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
@@ -576,7 +542,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
c.erase.opcode = NVM_OP_ERASE;
c.erase.nsid = cpu_to_le32(ns->ns_id);
c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
- c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
+ c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
}
@@ -601,10 +567,10 @@ static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
return dma_pool_alloc(pool, mem_flags, dma_handler);
}
-static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
+static void nvme_nvm_dev_dma_free(void *pool, void *addr,
dma_addr_t dma_handler)
{
- dma_pool_free(pool, ppa_list, dma_handler);
+ dma_pool_free(pool, addr, dma_handler);
}
static struct nvm_dev_ops nvme_nvm_dev_ops = {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f846da4eb338..114b92873894 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -67,7 +67,16 @@ enum nvme_quirks {
NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
};
+enum nvme_ctrl_state {
+ NVME_CTRL_NEW,
+ NVME_CTRL_LIVE,
+ NVME_CTRL_RESETTING,
+ NVME_CTRL_DELETING,
+};
+
struct nvme_ctrl {
+ enum nvme_ctrl_state state;
+ spinlock_t lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct device *dev;
@@ -84,7 +93,7 @@ struct nvme_ctrl {
char serial[20];
char model[40];
char firmware_rev[8];
- int cntlid;
+ u16 cntlid;
u32 ctrl_config;
@@ -99,6 +108,8 @@ struct nvme_ctrl {
u32 vs;
bool subsystem;
unsigned long quirks;
+ struct work_struct scan_work;
+ struct work_struct async_event_work;
};
/*
@@ -136,9 +147,10 @@ struct nvme_ctrl_ops {
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
- bool (*io_incapable)(struct nvme_ctrl *ctrl);
int (*reset_ctrl)(struct nvme_ctrl *ctrl);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
+ void (*post_scan)(struct nvme_ctrl *ctrl);
+ void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -150,17 +162,6 @@ static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
return val & NVME_CSTS_RDY;
}
-static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
-{
- u32 val = 0;
-
- if (ctrl->ops->io_incapable(ctrl))
- return true;
- if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
- return true;
- return val & NVME_CSTS_CFS;
-}
-
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
if (!ctrl->subsystem)
@@ -173,57 +174,20 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-static inline void nvme_setup_flush(struct nvme_ns *ns,
- struct nvme_command *cmnd)
+static inline unsigned nvme_map_len(struct request *rq)
{
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->common.opcode = nvme_cmd_flush;
- cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+ if (rq->cmd_flags & REQ_DISCARD)
+ return sizeof(struct nvme_dsm_range);
+ else
+ return blk_rq_bytes(rq);
}
-static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
- struct nvme_command *cmnd)
+static inline void nvme_cleanup_cmd(struct request *req)
{
- u16 control = 0;
- u32 dsmgmt = 0;
-
- if (req->cmd_flags & REQ_FUA)
- control |= NVME_RW_FUA;
- if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
- control |= NVME_RW_LR;
-
- if (req->cmd_flags & REQ_RAHEAD)
- dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
-
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.command_id = req->tag;
- cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
- if (ns->ms) {
- switch (ns->pi_type) {
- case NVME_NS_DPS_PI_TYPE3:
- control |= NVME_RW_PRINFO_PRCHK_GUARD;
- break;
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- control |= NVME_RW_PRINFO_PRCHK_GUARD |
- NVME_RW_PRINFO_PRCHK_REF;
- cmnd->rw.reftag = cpu_to_le32(
- nvme_block_nr(ns, blk_rq_pos(req)));
- break;
- }
- if (!blk_integrity_rq(req))
- control |= NVME_RW_PRINFO_PRACT;
- }
-
- cmnd->rw.control = cpu_to_le16(control);
- cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+ if (req->cmd_flags & REQ_DISCARD)
+ kfree(req->completion_data);
}
-
static inline int nvme_error_status(u16 status)
{
switch (status & 0x7ff) {
@@ -242,6 +206,8 @@ static inline bool nvme_req_needs_retry(struct request *req, u16 status)
(jiffies - req->start_time) < req->timeout;
}
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -251,9 +217,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
+void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+#define NVME_NR_AERS 1
+void nvme_complete_async_event(struct nvme_ctrl *ctrl,
+ struct nvme_completion *cqe);
+void nvme_queue_async_events(struct nvme_ctrl *ctrl);
+
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
@@ -261,6 +232,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags);
void nvme_requeue_req(struct request *req);
+int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4fd733ff72b1..0f093f14d348 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -54,8 +54,7 @@
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
*/
-#define NVME_NR_AEN_COMMANDS 1
-#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
+#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS)
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -92,9 +91,7 @@ struct nvme_dev {
struct msix_entry *entry;
void __iomem *bar;
struct work_struct reset_work;
- struct work_struct scan_work;
struct work_struct remove_work;
- struct work_struct async_work;
struct timer_list watchdog_timer;
struct mutex shutdown_lock;
bool subsystem;
@@ -102,11 +99,6 @@ struct nvme_dev {
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
- unsigned long flags;
-
-#define NVME_CTRL_RESETTING 0
-#define NVME_CTRL_REMOVING 1
-
struct nvme_ctrl ctrl;
struct completion ioq_wait;
};
@@ -271,40 +263,6 @@ static int nvme_init_request(void *data, struct request *req,
return 0;
}
-static void nvme_queue_scan(struct nvme_dev *dev)
-{
- /*
- * Do not queue new scan work when a controller is reset during
- * removal.
- */
- if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
- return;
- queue_work(nvme_workq, &dev->scan_work);
-}
-
-static void nvme_complete_async_event(struct nvme_dev *dev,
- struct nvme_completion *cqe)
-{
- u16 status = le16_to_cpu(cqe->status) >> 1;
- u32 result = le32_to_cpu(cqe->result);
-
- if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
- ++dev->ctrl.event_limit;
- queue_work(nvme_workq, &dev->async_work);
- }
-
- if (status != NVME_SC_SUCCESS)
- return;
-
- switch (result & 0xff07) {
- case NVME_AER_NOTICE_NS_CHANGED:
- dev_info(dev->ctrl.device, "rescanning\n");
- nvme_queue_scan(dev);
- default:
- dev_warn(dev->ctrl.device, "async event result %08x\n", result);
- }
-}
-
/**
* __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
@@ -334,16 +292,11 @@ static __le64 **iod_list(struct request *req)
return (__le64 **)(iod->sg + req->nr_phys_segments);
}
-static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, unsigned size,
+ struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = rq->nr_phys_segments;
- unsigned size;
-
- if (rq->cmd_flags & REQ_DISCARD)
- size = sizeof(struct nvme_dsm_range);
- else
- size = blk_rq_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -368,6 +321,8 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
__le64 **list = iod_list(req);
dma_addr_t prp_dma = iod->first_dma;
+ nvme_cleanup_cmd(req);
+
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
for (i = 0; i < iod->npages; i++) {
@@ -529,7 +484,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
}
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
- struct nvme_command *cmnd)
+ unsigned size, struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct request_queue *q = req->q;
@@ -546,7 +501,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
goto out;
- if (!nvme_setup_prps(dev, req, blk_rq_bytes(req)))
+ if (!nvme_setup_prps(dev, req, size))
goto out_unmap;
ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -596,37 +551,6 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
}
/*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
-static int nvme_setup_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- struct request *req, struct nvme_command *cmnd)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_dsm_range *range;
-
- range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
- &iod->first_dma);
- if (!range)
- return BLK_MQ_RQ_QUEUE_BUSY;
- iod_list(req)[0] = (__le64 *)range;
- iod->npages = 0;
-
- range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
-
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->dsm.opcode = nvme_cmd_dsm;
- cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
- cmnd->dsm.nr = 0;
- cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- return BLK_MQ_RQ_QUEUE_OK;
-}
-
-/*
* NOTE: ns is NULL when called on the admin queue.
*/
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -637,6 +561,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
+ unsigned map_len;
int ret = BLK_MQ_RQ_QUEUE_OK;
/*
@@ -652,23 +577,17 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- ret = nvme_init_iod(req, dev);
+ map_len = nvme_map_len(req);
+ ret = nvme_init_iod(req, map_len, dev);
if (ret)
return ret;
- if (req->cmd_flags & REQ_DISCARD) {
- ret = nvme_setup_discard(nvmeq, ns, req, &cmnd);
- } else {
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- memcpy(&cmnd, req->cmd, sizeof(cmnd));
- else if (req->cmd_flags & REQ_FLUSH)
- nvme_setup_flush(ns, &cmnd);
- else
- nvme_setup_rw(ns, req, &cmnd);
+ ret = nvme_setup_cmd(ns, req, &cmnd);
+ if (ret)
+ goto out;
- if (req->nr_phys_segments)
- ret = nvme_map_data(dev, req, &cmnd);
- }
+ if (req->nr_phys_segments)
+ ret = nvme_map_data(dev, req, map_len, &cmnd);
if (ret)
goto out;
@@ -764,7 +683,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*/
if (unlikely(nvmeq->qid == 0 &&
cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(nvmeq->dev, &cqe);
+ nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
continue;
}
@@ -833,21 +752,18 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return 0;
}
-static void nvme_async_event_work(struct work_struct *work)
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
+ c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
spin_lock_irq(&nvmeq->q_lock);
- while (dev->ctrl.event_limit > 0) {
- c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
- --dev->ctrl.event_limit;
- __nvme_submit_cmd(nvmeq, &c);
- }
+ __nvme_submit_cmd(nvmeq, &c);
spin_unlock_irq(&nvmeq->q_lock);
}
@@ -939,7 +855,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* cancellation error. All outstanding requests are completed on
* shutdown, so we return BLK_EH_HANDLED.
*/
- if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
+ if (dev->ctrl.state == NVME_CTRL_RESETTING) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
@@ -1003,16 +919,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
return BLK_EH_RESET_TIMER;
}
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
+static void nvme_cancel_io(struct request *req, void *data, bool reserved)
{
- struct nvme_queue *nvmeq = data;
int status;
if (!blk_mq_request_started(req))
return;
- dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
- "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
+ dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device,
+ "Cancelling I/O %d", req->tag);
status = NVME_SC_ABORT_REQ;
if (blk_queue_dying(req->q))
@@ -1069,14 +984,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
return 0;
}
-static void nvme_clear_queue(struct nvme_queue *nvmeq)
-{
- spin_lock_irq(&nvmeq->q_lock);
- if (nvmeq->tags && *nvmeq->tags)
- blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
- spin_unlock_irq(&nvmeq->q_lock);
-}
-
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
struct nvme_queue *nvmeq = dev->queues[0];
@@ -1350,22 +1257,44 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
return result;
}
+static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
+{
+
+ /* If true, indicates loss of adapter communication, possibly by a
+ * NVMe Subsystem reset.
+ */
+ bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
+
+ /* If there is a reset ongoing, we shouldn't reset again. */
+ if (work_busy(&dev->reset_work))
+ return false;
+
+ /* We shouldn't reset unless the controller is on fatal error state
+ * _or_ if we lost the communication with it.
+ */
+ if (!(csts & NVME_CSTS_CFS) && !nssro)
+ return false;
+
+ /* If PCI error recovery process is happening, we cannot reset or
+ * the recovery mechanism will surely fail.
+ */
+ if (pci_channel_offline(to_pci_dev(dev->dev)))
+ return false;
+
+ return true;
+}
+
static void nvme_watchdog_timer(unsigned long data)
{
struct nvme_dev *dev = (struct nvme_dev *)data;
u32 csts = readl(dev->bar + NVME_REG_CSTS);
- /*
- * Skip controllers currently under reset.
- */
- if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) &&
- ((csts & NVME_CSTS_CFS) ||
- (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) {
- if (queue_work(nvme_workq, &dev->reset_work)) {
+ /* Skip controllers under certain specific conditions. */
+ if (nvme_should_reset(dev, csts)) {
+ if (queue_work(nvme_workq, &dev->reset_work))
dev_warn(dev->dev,
"Failed status: 0x%x, reset controller.\n",
csts);
- }
return;
}
@@ -1551,8 +1480,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return result;
}
-static void nvme_set_irq_hints(struct nvme_dev *dev)
+static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq;
int i;
@@ -1567,16 +1497,6 @@ static void nvme_set_irq_hints(struct nvme_dev *dev)
}
}
-static void nvme_dev_scan(struct work_struct *work)
-{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
-
- if (!dev->tagset.tags)
- return;
- nvme_scan_namespaces(&dev->ctrl);
- nvme_set_irq_hints(dev);
-}
-
static void nvme_del_queue_end(struct request *req, int error)
{
struct nvme_queue *nvmeq = req->end_io_data;
@@ -1592,7 +1512,13 @@ static void nvme_del_cq_end(struct request *req, int error)
if (!error) {
unsigned long flags;
- spin_lock_irqsave(&nvmeq->q_lock, flags);
+ /*
+ * We might be called with the AQ q_lock held
+ * and the I/O queue q_lock should always
+ * nest inside the AQ one.
+ */
+ spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
+ SINGLE_DEPTH_NESTING);
nvme_process_cq(nvmeq);
spin_unlock_irqrestore(&nvmeq->q_lock, flags);
}
@@ -1684,7 +1610,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
nvme_free_queues(dev, dev->online_queues);
}
- nvme_queue_scan(dev);
return 0;
}
@@ -1797,8 +1722,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_pci_disable(dev);
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_clear_queue(dev->queues[i]);
+ blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
+ blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
mutex_unlock(&dev->shutdown_lock);
}
@@ -1854,7 +1779,7 @@ static void nvme_reset_work(struct work_struct *work)
struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
int result = -ENODEV;
- if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
+ if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
goto out;
/*
@@ -1864,11 +1789,9 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
- if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
goto out;
- set_bit(NVME_CTRL_RESETTING, &dev->flags);
-
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -1890,8 +1813,14 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out;
- dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
- queue_work(nvme_workq, &dev->async_work);
+ /*
+ * A controller that can not execute IO typically requires user
+ * intervention to correct. For such degraded controllers, the driver
+ * should not submit commands the user did not request, so skip
+ * registering for asynchronous event notification on this condition.
+ */
+ if (dev->online_queues > 1)
+ nvme_queue_async_events(&dev->ctrl);
mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
@@ -1901,13 +1830,20 @@ static void nvme_reset_work(struct work_struct *work)
*/
if (dev->online_queues < 2) {
dev_warn(dev->ctrl.device, "IO queues not created\n");
+ nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
} else {
nvme_start_queues(&dev->ctrl);
nvme_dev_add(dev);
}
- clear_bit(NVME_CTRL_RESETTING, &dev->flags);
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
+ dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+ goto out;
+ }
+
+ if (dev->online_queues > 1)
+ nvme_queue_scan(&dev->ctrl);
return;
out:
@@ -1955,13 +1891,6 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
return 0;
}
-static bool nvme_pci_io_incapable(struct nvme_ctrl *ctrl)
-{
- struct nvme_dev *dev = to_nvme_dev(ctrl);
-
- return !dev->bar || dev->online_queues < 2;
-}
-
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
return nvme_reset(to_nvme_dev(ctrl));
@@ -1972,9 +1901,10 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
- .io_incapable = nvme_pci_io_incapable,
.reset_ctrl = nvme_pci_reset_ctrl,
.free_ctrl = nvme_pci_free_ctrl,
+ .post_scan = nvme_pci_post_scan,
+ .submit_async_event = nvme_pci_submit_async_event,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -2026,10 +1956,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto free;
- INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
- INIT_WORK(&dev->async_work, nvme_async_event_work);
setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
(unsigned long)dev);
mutex_init(&dev->shutdown_lock);
@@ -2086,15 +2014,12 @@ static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- set_bit(NVME_CTRL_REMOVING, &dev->flags);
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+
pci_set_drvdata(pdev, NULL);
- flush_work(&dev->async_work);
flush_work(&dev->reset_work);
- flush_work(&dev->scan_work);
- nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true);
- flush_work(&dev->reset_work);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index e2a48415d969..b3bec3aaa45d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -112,4 +112,7 @@ config OF_OVERLAY
While this option is selected automatically when needed, you can
enable it manually to improve device tree unit test coverage.
+config OF_NUMA
+ bool
+
endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 156c072b3117..bee3fa96b981 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_OF_MTD) += of_mtd.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
+obj-$(CONFIG_OF_NUMA) += of_numa.o
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
new file mode 100644
index 000000000000..0f2784bc1874
--- /dev/null
+++ b/drivers/of/of_numa.c
@@ -0,0 +1,211 @@
+/*
+ * OF NUMA Parsing support.
+ *
+ * Copyright (C) 2015 - 2016 Cavium Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/nodemask.h>
+
+#include <asm/numa.h>
+
+/* define default numa node to 0 */
+#define DEFAULT_NODE 0
+
+/*
+ * Even though we connect cpus to numa domains later in SMP
+ * init, we need to know the node ids now for all cpus.
+*/
+static void __init of_numa_parse_cpu_nodes(void)
+{
+ u32 nid;
+ int r;
+ struct device_node *cpus;
+ struct device_node *np = NULL;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (!cpus)
+ return;
+
+ for_each_child_of_node(cpus, np) {
+ /* Skip things that are not CPUs */
+ if (of_node_cmp(np->type, "cpu") != 0)
+ continue;
+
+ r = of_property_read_u32(np, "numa-node-id", &nid);
+ if (r)
+ continue;
+
+ pr_debug("NUMA: CPU on %u\n", nid);
+ if (nid >= MAX_NUMNODES)
+ pr_warn("NUMA: Node id %u exceeds maximum value\n",
+ nid);
+ else
+ node_set(nid, numa_nodes_parsed);
+ }
+}
+
+static int __init of_numa_parse_memory_nodes(void)
+{
+ struct device_node *np = NULL;
+ struct resource rsrc;
+ u32 nid;
+ int r = 0;
+
+ for (;;) {
+ np = of_find_node_by_type(np, "memory");
+ if (!np)
+ break;
+
+ r = of_property_read_u32(np, "numa-node-id", &nid);
+ if (r == -EINVAL)
+ /*
+ * property doesn't exist if -EINVAL, continue
+ * looking for more memory nodes with
+ * "numa-node-id" property
+ */
+ continue;
+ else if (r)
+ /* some other error */
+ break;
+
+ r = of_address_to_resource(np, 0, &rsrc);
+ if (r) {
+ pr_err("NUMA: bad reg property in memory node\n");
+ break;
+ }
+
+ pr_debug("NUMA: base = %llx len = %llx, node = %u\n",
+ rsrc.start, rsrc.end - rsrc.start + 1, nid);
+
+ r = numa_add_memblk(nid, rsrc.start,
+ rsrc.end - rsrc.start + 1);
+ if (r)
+ break;
+ }
+ of_node_put(np);
+
+ return r;
+}
+
+static int __init of_numa_parse_distance_map_v1(struct device_node *map)
+{
+ const __be32 *matrix;
+ int entry_count;
+ int i;
+
+ pr_info("NUMA: parsing numa-distance-map-v1\n");
+
+ matrix = of_get_property(map, "distance-matrix", NULL);
+ if (!matrix) {
+ pr_err("NUMA: No distance-matrix property in distance-map\n");
+ return -EINVAL;
+ }
+
+ entry_count = of_property_count_u32_elems(map, "distance-matrix");
+ if (entry_count <= 0) {
+ pr_err("NUMA: Invalid distance-matrix\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i + 2 < entry_count; i += 3) {
+ u32 nodea, nodeb, distance;
+
+ nodea = of_read_number(matrix, 1);
+ matrix++;
+ nodeb = of_read_number(matrix, 1);
+ matrix++;
+ distance = of_read_number(matrix, 1);
+ matrix++;
+
+ numa_set_distance(nodea, nodeb, distance);
+ pr_debug("NUMA: distance[node%d -> node%d] = %d\n",
+ nodea, nodeb, distance);
+
+ /* Set default distance of node B->A same as A->B */
+ if (nodeb > nodea)
+ numa_set_distance(nodeb, nodea, distance);
+ }
+
+ return 0;
+}
+
+static int __init of_numa_parse_distance_map(void)
+{
+ int ret = 0;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "numa-distance-map-v1");
+ if (np)
+ ret = of_numa_parse_distance_map_v1(np);
+
+ of_node_put(np);
+ return ret;
+}
+
+int of_node_to_nid(struct device_node *device)
+{
+ struct device_node *np;
+ u32 nid;
+ int r = -ENODATA;
+
+ np = of_node_get(device);
+
+ while (np) {
+ struct device_node *parent;
+
+ r = of_property_read_u32(np, "numa-node-id", &nid);
+ /*
+ * -EINVAL indicates the property was not found, and
+ * we walk up the tree trying to find a parent with a
+ * "numa-node-id". Any other type of error indicates
+ * a bad device tree and we give up.
+ */
+ if (r != -EINVAL)
+ break;
+
+ parent = of_get_parent(np);
+ of_node_put(np);
+ np = parent;
+ }
+ if (np && r)
+ pr_warn("NUMA: Invalid \"numa-node-id\" property in node %s\n",
+ np->name);
+ of_node_put(np);
+
+ if (!r) {
+ if (nid >= MAX_NUMNODES)
+ pr_warn("NUMA: Node id %u exceeds maximum value\n",
+ nid);
+ else
+ return nid;
+ }
+
+ return NUMA_NO_NODE;
+}
+EXPORT_SYMBOL(of_node_to_nid);
+
+int __init of_numa_init(void)
+{
+ int r;
+
+ of_numa_parse_cpu_nodes();
+ r = of_numa_parse_memory_nodes();
+ if (r)
+ return r;
+ return of_numa_parse_distance_map();
+}
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f70090897fdf..f2d01d4d9364 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -847,6 +847,14 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!platform_get_irq(cpu_pmu->plat_device, 0))
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ /*
+ * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
+ * big.LITTLE). This is not an uncore PMU, and we have taken ctx
+ * sharing into account (e.g. with our pmu::filter_match callback and
+ * pmu::event_init group validation).
+ */
+ cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
+
return 0;
out_unregister:
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 1062fa42ff26..79d64ea00bfb 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -793,15 +793,6 @@ static acpi_status __init AMW0_find_mailled(void)
return AE_OK;
}
-static int AMW0_set_cap_acpi_check_device_found __initdata;
-
-static acpi_status __init AMW0_set_cap_acpi_check_device_cb(acpi_handle handle,
- u32 level, void *context, void **retval)
-{
- AMW0_set_cap_acpi_check_device_found = 1;
- return AE_OK;
-}
-
static const struct acpi_device_id norfkill_ids[] __initconst = {
{ "VPC2004", 0},
{ "IBM0068", 0},
@@ -816,9 +807,10 @@ static int __init AMW0_set_cap_acpi_check_device(void)
const struct acpi_device_id *id;
for (id = norfkill_ids; id->id[0]; id++)
- acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb,
- NULL, NULL);
- return AMW0_set_cap_acpi_check_device_found;
+ if (acpi_dev_found(id->id))
+ return true;
+
+ return false;
}
static acpi_status __init AMW0_set_capabilities(void)
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 14fd2ecb06a1..17b365f26f9d 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -204,30 +204,10 @@ static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
}
}
-static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
- void *context, void **retval)
-{
- pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
- *(bool *)context = true;
- return AE_CTRL_TERMINATE;
-}
-
-static int eeepc_wmi_check_atkd(void)
-{
- acpi_status status;
- bool found = false;
-
- status = acpi_get_devices(EEEPC_ACPI_HID, eeepc_wmi_parse_device,
- &found, NULL);
-
- if (ACPI_FAILURE(status) || !found)
- return 0;
- return -1;
-}
-
static int eeepc_wmi_probe(struct platform_device *pdev)
{
- if (eeepc_wmi_check_atkd()) {
+ if (acpi_dev_found(EEEPC_ACPI_HID)) {
+ pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
pr_warn("WMI device present, but legacy ATKD device is also "
"present and enabled\n");
pr_warn("You probably booted with acpi_osi=\"Linux\" or "
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index facd43b8516c..81603d99082b 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -521,10 +521,11 @@ static int __init pnpbios_init(void)
int ret;
if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
- paravirt_enabled()) {
+ arch_pnpbios_disabled()) {
printk(KERN_INFO "PnPBIOS: Disabled\n");
return -ENODEV;
}
+
#ifdef CONFIG_PNPACPI
if (!acpi_disabled && !pnpacpi_disabled) {
pnpbios_disabled = 1;
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 8986382718dd..01b6d3f9b8fb 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -336,6 +336,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct rockchip_iodomain *iod;
+ struct device *parent;
int i, ret = 0;
if (!np)
@@ -351,7 +352,14 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
match = of_match_node(rockchip_iodomain_match, np);
iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
- iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ parent = pdev->dev.parent;
+ if (parent && parent->of_node) {
+ iod->grf = syscon_node_to_regmap(parent->of_node);
+ } else {
+ dev_dbg(&pdev->dev, "falling back to old binding\n");
+ iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ }
+
if (IS_ERR(iod->grf)) {
dev_err(&pdev->dev, "couldn't find grf regmap\n");
return PTR_ERR(iod->grf);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 8fad0a7044d3..b2766b867b0e 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -34,6 +34,9 @@
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
+/* Local defines */
+#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
+
/* bitmasks for RAPL MSRs, used by primitive access functions */
#define ENERGY_STATUS_MASK 0xffffffff
@@ -86,6 +89,7 @@ enum rapl_domain_type {
RAPL_DOMAIN_PP0, /* core power plane */
RAPL_DOMAIN_PP1, /* graphics uncore */
RAPL_DOMAIN_DRAM,/* DRAM control_type */
+ RAPL_DOMAIN_PLATFORM, /* PSys control_type */
RAPL_DOMAIN_MAX,
};
@@ -251,9 +255,11 @@ static const char * const rapl_domain_names[] = {
"core",
"uncore",
"dram",
+ "psys",
};
static struct powercap_control_type *control_type; /* PowerCap Controller */
+static struct rapl_domain *platform_rapl_domain; /* Platform (PSys) domain */
/* caller to ensure CPU hotplug lock is held */
static struct rapl_package *find_package_by_id(int id)
@@ -409,6 +415,14 @@ static const struct powercap_zone_ops zone_ops[] = {
.set_enable = set_domain_enable,
.get_enable = get_domain_enable,
},
+ /* RAPL_DOMAIN_PLATFORM */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
};
static int set_power_limit(struct powercap_zone *power_zone, int id,
@@ -1101,6 +1115,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */
RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
+ RAPL_CPU(0x8E, rapl_defaults_core),/* Kabylake */
+ RAPL_CPU(0x9E, rapl_defaults_core),/* Kabylake */
{}
};
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
@@ -1160,6 +1176,13 @@ static int rapl_unregister_powercap(void)
powercap_unregister_zone(control_type,
&rd_package->power_zone);
}
+
+ if (platform_rapl_domain) {
+ powercap_unregister_zone(control_type,
+ &platform_rapl_domain->power_zone);
+ kfree(platform_rapl_domain);
+ }
+
powercap_unregister_control_type(control_type);
return 0;
@@ -1239,6 +1262,47 @@ err_cleanup:
return ret;
}
+static int rapl_register_psys(void)
+{
+ struct rapl_domain *rd;
+ struct powercap_zone *power_zone;
+ u64 val;
+
+ if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_ENERGY_STATUS, &val) || !val)
+ return -ENODEV;
+
+ if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_POWER_LIMIT, &val) || !val)
+ return -ENODEV;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PLATFORM];
+ rd->id = RAPL_DOMAIN_PLATFORM;
+ rd->msrs[0] = MSR_PLATFORM_POWER_LIMIT;
+ rd->msrs[1] = MSR_PLATFORM_ENERGY_STATUS;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ rd->rpl[1].prim_id = PL2_ENABLE;
+ rd->rpl[1].name = pl2_name;
+ rd->rp = find_package_by_id(0);
+
+ power_zone = powercap_register_zone(&rd->power_zone, control_type,
+ "psys", NULL,
+ &zone_ops[RAPL_DOMAIN_PLATFORM],
+ 2, &constraint_ops);
+
+ if (IS_ERR(power_zone)) {
+ kfree(rd);
+ return PTR_ERR(power_zone);
+ }
+
+ platform_rapl_domain = rd;
+
+ return 0;
+}
+
static int rapl_register_powercap(void)
{
struct rapl_domain *rd;
@@ -1255,6 +1319,10 @@ static int rapl_register_powercap(void)
list_for_each_entry(rp, &rapl_packages, plist)
if (rapl_package_register_powercap(rp))
goto err_cleanup_package;
+
+ /* Don't bail out if PSys is not supported */
+ rapl_register_psys();
+
return ret;
err_cleanup_package:
@@ -1289,6 +1357,9 @@ static int rapl_check_domain(int cpu, int domain)
case RAPL_DOMAIN_DRAM:
msr = MSR_DRAM_ENERGY_STATUS;
break;
+ case RAPL_DOMAIN_PLATFORM:
+ /* PSYS(PLATFORM) is not a CPU domain, so avoid printng error */
+ return -EINVAL;
default:
pr_err("invalid domain id %d\n", domain);
return -EINVAL;
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 7831bc6b51dd..680fbc795a0a 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -128,6 +128,13 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
set_bit(PWMF_REQUESTED, &pwm->flags);
pwm->label = label;
+ /*
+ * FIXME: This should be removed once all PWM users properly make use
+ * of struct pwm_args to initialize the PWM device. As long as this is
+ * here, the PWM state and hardware state can get out of sync.
+ */
+ pwm_apply_args(pwm);
+
return 0;
}
@@ -146,12 +153,12 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- pwm_set_period(pwm, args->args[1]);
+ pwm->args.period = args->args[1];
if (args->args[2] & PWM_POLARITY_INVERTED)
- pwm_set_polarity(pwm, PWM_POLARITY_INVERSED);
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
else
- pwm_set_polarity(pwm, PWM_POLARITY_NORMAL);
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
return pwm;
}
@@ -172,7 +179,7 @@ of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- pwm_set_period(pwm, args->args[1]);
+ pwm->args.period = args->args[1];
return pwm;
}
@@ -747,13 +754,13 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
if (!chip)
goto out;
+ pwm->args.period = chosen->period;
+ pwm->args.polarity = chosen->polarity;
+
pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id);
if (IS_ERR(pwm))
goto out;
- pwm_set_period(pwm, chosen->period);
- pwm_set_polarity(pwm, chosen->polarity);
-
out:
mutex_unlock(&pwm_lookup_lock);
return pwm;
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index a80c10803636..7d335422cfda 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -60,7 +60,7 @@ static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
return -EINVAL;
/* Store constant period value */
- pwm_set_period(pwm, DIV_ROUND_CLOSEST(NSEC_PER_SEC, freq));
+ pwm->args.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, freq);
return 0;
}
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index cb2f7024cf68..58b709f29130 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -160,7 +160,7 @@ pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- pwm_set_period(pwm, args->args[0]);
+ pwm->args.period = args->args[0];
return pwm;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c77dc08b1202..144cbf5b3e5a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -321,6 +321,15 @@ config REGULATOR_LP872X
help
This driver supports LP8720/LP8725 PMIC
+config REGULATOR_LP873X
+ tristate "TI LP873X Power regulators"
+ depends on MFD_LP873X && OF
+ help
+ This driver supports LP873X voltage regulator chips. LP873X
+ provides two step-down converters and two general-purpose LDO
+ voltage regulators. It supports software based voltage control
+ for different voltage domains
+
config REGULATOR_LP8755
tristate "TI LP8755 High Performance PMU driver"
depends on I2C
@@ -409,6 +418,7 @@ config REGULATOR_MAX8952
config REGULATOR_MAX8973
tristate "Maxim MAX8973 voltage regulator "
depends on I2C
+ depends on THERMAL && THERMAL_OF
select REGMAP_I2C
help
The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down
@@ -548,6 +558,13 @@ config REGULATOR_PV88060
Say y here to support the voltage regulators and convertors
PV88060
+config REGULATOR_PV88080
+ tristate "Powerventure Semiconductor PV88080 regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the buck convertors on PV88080
+
config REGULATOR_PV88090
tristate "Powerventure Semiconductor PV88090 regulator"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 61bfbb9d4a0c..85a1d44a3939 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -42,11 +42,12 @@ obj-$(CONFIG_REGULATOR_LM363X) += lm363x-regulator.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
+obj-$(CONFIG_REGULATOR_LP873X) += lp873x-regulator.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
-obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
+obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
@@ -55,10 +56,10 @@ obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
-obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
+obj-$(CONFIG_REGULATOR_MAX8997) += max8997-regulator.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
-obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
+obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o
+obj-$(CONFIG_REGULATOR_PV88080) += pv88080-regulator.o
obj-$(CONFIG_REGULATOR_PV88090) += pv88090-regulator.o
obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 000d566e32a4..a1cd0d4f8257 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -139,6 +139,74 @@ struct act8865 {
int off_mask;
};
+static const struct regmap_range act8600_reg_ranges[] = {
+ regmap_reg_range(0x00, 0x01),
+ regmap_reg_range(0x10, 0x10),
+ regmap_reg_range(0x12, 0x12),
+ regmap_reg_range(0x20, 0x20),
+ regmap_reg_range(0x22, 0x22),
+ regmap_reg_range(0x30, 0x30),
+ regmap_reg_range(0x32, 0x32),
+ regmap_reg_range(0x40, 0x41),
+ regmap_reg_range(0x50, 0x51),
+ regmap_reg_range(0x60, 0x61),
+ regmap_reg_range(0x70, 0x71),
+ regmap_reg_range(0x80, 0x81),
+ regmap_reg_range(0x91, 0x91),
+ regmap_reg_range(0xA1, 0xA1),
+ regmap_reg_range(0xA8, 0xAA),
+ regmap_reg_range(0xB0, 0xB0),
+ regmap_reg_range(0xB2, 0xB2),
+ regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_range act8600_reg_ro_ranges[] = {
+ regmap_reg_range(0xAA, 0xAA),
+ regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_range act8600_reg_volatile_ranges[] = {
+ regmap_reg_range(0x00, 0x01),
+ regmap_reg_range(0x12, 0x12),
+ regmap_reg_range(0x22, 0x22),
+ regmap_reg_range(0x32, 0x32),
+ regmap_reg_range(0x41, 0x41),
+ regmap_reg_range(0x51, 0x51),
+ regmap_reg_range(0x61, 0x61),
+ regmap_reg_range(0x71, 0x71),
+ regmap_reg_range(0x81, 0x81),
+ regmap_reg_range(0xA8, 0xA8),
+ regmap_reg_range(0xAA, 0xAA),
+ regmap_reg_range(0xB0, 0xB0),
+ regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_access_table act8600_write_ranges_table = {
+ .yes_ranges = act8600_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(act8600_reg_ranges),
+ .no_ranges = act8600_reg_ro_ranges,
+ .n_no_ranges = ARRAY_SIZE(act8600_reg_ro_ranges),
+};
+
+static const struct regmap_access_table act8600_read_ranges_table = {
+ .yes_ranges = act8600_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(act8600_reg_ranges),
+};
+
+static const struct regmap_access_table act8600_volatile_ranges_table = {
+ .yes_ranges = act8600_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(act8600_reg_volatile_ranges),
+};
+
+static const struct regmap_config act8600_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xFF,
+ .wr_table = &act8600_write_ranges_table,
+ .rd_table = &act8600_read_ranges_table,
+ .volatile_table = &act8600_volatile_ranges_table,
+};
+
static const struct regmap_config act8865_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -319,7 +387,6 @@ static struct of_regulator_match act8600_matches[] = {
};
static int act8865_pdata_from_dt(struct device *dev,
- struct device_node **of_node,
struct act8865_platform_data *pdata,
unsigned long type)
{
@@ -370,7 +437,7 @@ static int act8865_pdata_from_dt(struct device *dev,
regulator->id = i;
regulator->name = matches[i].name;
regulator->init_data = matches[i].init_data;
- of_node[i] = matches[i].of_node;
+ regulator->of_node = matches[i].of_node;
regulator++;
}
@@ -378,7 +445,6 @@ static int act8865_pdata_from_dt(struct device *dev,
}
#else
static inline int act8865_pdata_from_dt(struct device *dev,
- struct device_node **of_node,
struct act8865_platform_data *pdata,
unsigned long type)
{
@@ -386,8 +452,8 @@ static inline int act8865_pdata_from_dt(struct device *dev,
}
#endif
-static struct regulator_init_data
-*act8865_get_init_data(int id, struct act8865_platform_data *pdata)
+static struct act8865_regulator_data *act8865_get_regulator_data(
+ int id, struct act8865_platform_data *pdata)
{
int i;
@@ -396,7 +462,7 @@ static struct regulator_init_data
for (i = 0; i < pdata->num_regulators; i++) {
if (pdata->regulators[i].id == id)
- return pdata->regulators[i].init_data;
+ return &pdata->regulators[i];
}
return NULL;
@@ -418,9 +484,9 @@ static int act8865_pmic_probe(struct i2c_client *client,
const struct regulator_desc *regulators;
struct act8865_platform_data pdata_of, *pdata;
struct device *dev = &client->dev;
- struct device_node **of_node;
int i, ret, num_regulators;
struct act8865 *act8865;
+ const struct regmap_config *regmap_config;
unsigned long type;
int off_reg, off_mask;
int voltage_select = 0;
@@ -447,12 +513,14 @@ static int act8865_pmic_probe(struct i2c_client *client,
case ACT8600:
regulators = act8600_regulators;
num_regulators = ARRAY_SIZE(act8600_regulators);
+ regmap_config = &act8600_regmap_config;
off_reg = -1;
off_mask = -1;
break;
case ACT8846:
regulators = act8846_regulators;
num_regulators = ARRAY_SIZE(act8846_regulators);
+ regmap_config = &act8865_regmap_config;
off_reg = ACT8846_GLB_OFF_CTRL;
off_mask = ACT8846_OFF_SYSMASK;
break;
@@ -464,6 +532,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
regulators = act8865_regulators;
num_regulators = ARRAY_SIZE(act8865_regulators);
}
+ regmap_config = &act8865_regmap_config;
off_reg = ACT8865_SYS_CTRL;
off_mask = ACT8865_MSTROFF;
break;
@@ -472,34 +541,22 @@ static int act8865_pmic_probe(struct i2c_client *client,
return -EINVAL;
}
- of_node = devm_kzalloc(dev, sizeof(struct device_node *) *
- num_regulators, GFP_KERNEL);
- if (!of_node)
- return -ENOMEM;
-
if (dev->of_node && !pdata) {
- ret = act8865_pdata_from_dt(dev, of_node, &pdata_of, type);
+ ret = act8865_pdata_from_dt(dev, &pdata_of, type);
if (ret < 0)
return ret;
pdata = &pdata_of;
}
- if (pdata->num_regulators > num_regulators) {
- dev_err(dev, "too many regulators: %d\n",
- pdata->num_regulators);
- return -EINVAL;
- }
-
act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
if (!act8865)
return -ENOMEM;
- act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
+ act8865->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(act8865->regmap)) {
ret = PTR_ERR(act8865->regmap);
- dev_err(&client->dev, "Failed to allocate register map: %d\n",
- ret);
+ dev_err(dev, "Failed to allocate register map: %d\n", ret);
return ret;
}
@@ -518,15 +575,20 @@ static int act8865_pmic_probe(struct i2c_client *client,
for (i = 0; i < num_regulators; i++) {
const struct regulator_desc *desc = &regulators[i];
struct regulator_config config = { };
+ struct act8865_regulator_data *rdata;
struct regulator_dev *rdev;
config.dev = dev;
- config.init_data = act8865_get_init_data(desc->id, pdata);
- config.of_node = of_node[i];
config.driver_data = act8865;
config.regmap = act8865->regmap;
- rdev = devm_regulator_register(&client->dev, desc, &config);
+ rdata = act8865_get_regulator_data(desc->id, pdata);
+ if (rdata) {
+ config.init_data = rdata->init_data;
+ config.of_node = rdata->of_node;
+ }
+
+ rdev = devm_regulator_register(dev, desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", desc->name);
return PTR_ERR(rdev);
@@ -534,7 +596,6 @@ static int act8865_pmic_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, act8865);
- devm_kfree(dev, of_node);
return 0;
}
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 8b046eec6ae0..66337e12719b 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -372,7 +372,7 @@ static int as3722_ldo_set_current_limit(struct regulator_dev *rdev,
AS3722_LDO_ILIMIT_MASK, reg);
}
-static struct regulator_ops as3722_ldo0_ops = {
+static const struct regulator_ops as3722_ldo0_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -383,7 +383,7 @@ static struct regulator_ops as3722_ldo0_ops = {
.set_current_limit = as3722_ldo_set_current_limit,
};
-static struct regulator_ops as3722_ldo0_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo0_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -415,7 +415,7 @@ static int as3722_ldo3_get_current_limit(struct regulator_dev *rdev)
return 150000;
}
-static struct regulator_ops as3722_ldo3_ops = {
+static const struct regulator_ops as3722_ldo3_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -425,20 +425,45 @@ static struct regulator_ops as3722_ldo3_ops = {
.get_current_limit = as3722_ldo3_get_current_limit,
};
-static struct regulator_ops as3722_ldo3_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo3_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_current_limit = as3722_ldo3_get_current_limit,
};
+static const struct regulator_ops as3722_ldo6_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+};
+
+static const struct regulator_ops as3722_ldo6_extcntrl_ops = {
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+};
+
static const struct regulator_linear_range as3722_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
};
-static struct regulator_ops as3722_ldo_ops = {
+static const struct regulator_ops as3722_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -450,7 +475,7 @@ static struct regulator_ops as3722_ldo_ops = {
.set_current_limit = as3722_ldo_set_current_limit,
};
-static struct regulator_ops as3722_ldo_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo_extcntrl_ops = {
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -616,7 +641,7 @@ static const struct regulator_linear_range as3722_sd2345_ranges[] = {
REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
};
-static struct regulator_ops as3722_sd016_ops = {
+static const struct regulator_ops as3722_sd016_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -630,7 +655,7 @@ static struct regulator_ops as3722_sd016_ops = {
.set_mode = as3722_sd_set_mode,
};
-static struct regulator_ops as3722_sd016_extcntrl_ops = {
+static const struct regulator_ops as3722_sd016_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -641,7 +666,7 @@ static struct regulator_ops as3722_sd016_extcntrl_ops = {
.set_mode = as3722_sd_set_mode,
};
-static struct regulator_ops as3722_sd2345_ops = {
+static const struct regulator_ops as3722_sd2345_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -653,7 +678,7 @@ static struct regulator_ops as3722_sd2345_ops = {
.set_mode = as3722_sd_set_mode,
};
-static struct regulator_ops as3722_sd2345_extcntrl_ops = {
+static const struct regulator_ops as3722_sd2345_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -760,7 +785,7 @@ static int as3722_regulator_probe(struct platform_device *pdev)
struct as3722_regulator_config_data *reg_config;
struct regulator_dev *rdev;
struct regulator_config config = { };
- struct regulator_ops *ops;
+ const struct regulator_ops *ops;
int id;
int ret;
@@ -829,6 +854,24 @@ static int as3722_regulator_probe(struct platform_device *pdev)
}
}
break;
+ case AS3722_REGULATOR_ID_LDO6:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo6_extcntrl_ops;
+ else
+ ops = &as3722_ldo6_ops;
+ as3722_regs->desc[id].enable_time = 500;
+ as3722_regs->desc[id].bypass_reg =
+ AS3722_LDO6_VOLTAGE_REG;
+ as3722_regs->desc[id].bypass_mask =
+ AS3722_LDO_VSEL_MASK;
+ as3722_regs->desc[id].bypass_val_on =
+ AS3722_LDO6_VSEL_BYPASS;
+ as3722_regs->desc[id].bypass_val_off =
+ AS3722_LDO6_VSEL_BYPASS;
+ as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
+ as3722_regs->desc[id].n_linear_ranges =
+ ARRAY_SIZE(as3722_ldo_ranges);
+ break;
case AS3722_REGULATOR_ID_SD0:
case AS3722_REGULATOR_ID_SD1:
case AS3722_REGULATOR_ID_SD6:
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e0b764284773..ec8184d53f13 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -132,6 +132,19 @@ static bool have_full_constraints(void)
return has_full_constraints || of_have_populated_dt();
}
+static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops)
+{
+ if (!rdev->constraints) {
+ rdev_err(rdev, "no constraints\n");
+ return false;
+ }
+
+ if (rdev->constraints->valid_ops_mask & ops)
+ return true;
+
+ return false;
+}
+
static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
{
if (rdev && rdev->supply)
@@ -198,28 +211,13 @@ static struct device_node *of_get_regulator(struct device *dev, const char *supp
return regnode;
}
-static int _regulator_can_change_status(struct regulator_dev *rdev)
-{
- if (!rdev->constraints)
- return 0;
-
- if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
- return 1;
- else
- return 0;
-}
-
/* Platform voltage constraint check */
static int regulator_check_voltage(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
{
BUG_ON(*min_uV > *max_uV);
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
rdev_err(rdev, "voltage operation not allowed\n");
return -EPERM;
}
@@ -275,11 +273,7 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
{
BUG_ON(*min_uA > *max_uA);
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_CURRENT)) {
rdev_err(rdev, "current operation not allowed\n");
return -EPERM;
}
@@ -312,11 +306,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
return -EINVAL;
}
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_MODE)) {
rdev_err(rdev, "mode operation not allowed\n");
return -EPERM;
}
@@ -333,20 +323,6 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
return -EINVAL;
}
-/* dynamic regulator mode switching constraint check */
-static int regulator_check_drms(struct regulator_dev *rdev)
-{
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
- rdev_dbg(rdev, "drms operation not allowed\n");
- return -EPERM;
- }
- return 0;
-}
-
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -692,8 +668,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
* first check to see if we can set modes at all, otherwise just
* tell the consumer everything is OK.
*/
- err = regulator_check_drms(rdev);
- if (err < 0)
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
return 0;
if (!rdev->desc->ops->get_optimum_mode &&
@@ -808,8 +783,6 @@ static int suspend_set_state(struct regulator_dev *rdev,
/* locks held by caller */
static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
{
- lockdep_assert_held_once(&rdev->mutex);
-
if (!rdev->constraints)
return -EINVAL;
@@ -893,7 +866,7 @@ static void print_constraints(struct regulator_dev *rdev)
rdev_dbg(rdev, "%s\n", buf);
if ((constraints->min_uV != constraints->max_uV) &&
- !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE))
+ !regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE))
rdev_warn(rdev,
"Voltage range but no REGULATOR_CHANGE_VOLTAGE\n");
}
@@ -906,7 +879,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
/* do we need to apply the constraint voltage */
if (rdev->constraints->apply_uV &&
- rdev->constraints->min_uV == rdev->constraints->max_uV) {
+ rdev->constraints->min_uV && rdev->constraints->max_uV) {
+ int target_min, target_max;
int current_uV = _regulator_get_voltage(rdev);
if (current_uV < 0) {
rdev_err(rdev,
@@ -914,15 +888,34 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
current_uV);
return current_uV;
}
- if (current_uV < rdev->constraints->min_uV ||
- current_uV > rdev->constraints->max_uV) {
+
+ /*
+ * If we're below the minimum voltage move up to the
+ * minimum voltage, if we're above the maximum voltage
+ * then move down to the maximum.
+ */
+ target_min = current_uV;
+ target_max = current_uV;
+
+ if (current_uV < rdev->constraints->min_uV) {
+ target_min = rdev->constraints->min_uV;
+ target_max = rdev->constraints->min_uV;
+ }
+
+ if (current_uV > rdev->constraints->max_uV) {
+ target_min = rdev->constraints->max_uV;
+ target_max = rdev->constraints->max_uV;
+ }
+
+ if (target_min != current_uV || target_max != current_uV) {
+ rdev_info(rdev, "Bringing %duV into %d-%duV\n",
+ current_uV, target_min, target_max);
ret = _regulator_do_set_voltage(
- rdev, rdev->constraints->min_uV,
- rdev->constraints->max_uV);
+ rdev, target_min, target_max);
if (ret < 0) {
rdev_err(rdev,
- "failed to apply %duV constraint(%d)\n",
- rdev->constraints->min_uV, ret);
+ "failed to apply %d-%duV constraint(%d)\n",
+ target_min, target_max, ret);
return ret;
}
}
@@ -1150,17 +1143,6 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
- if (rdev->constraints->active_discharge && ops->set_active_discharge) {
- bool ad_state = (rdev->constraints->active_discharge ==
- REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
-
- ret = ops->set_active_discharge(rdev, ad_state);
- if (ret < 0) {
- rdev_err(rdev, "failed to set active discharge\n");
- return ret;
- }
- }
-
print_constraints(rdev);
return 0;
}
@@ -1272,6 +1254,55 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
}
}
+#ifdef CONFIG_DEBUG_FS
+static ssize_t constraint_flags_read_file(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const struct regulator *regulator = file->private_data;
+ const struct regulation_constraints *c = regulator->rdev->constraints;
+ char *buf;
+ ssize_t ret;
+
+ if (!c)
+ return 0;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = snprintf(buf, PAGE_SIZE,
+ "always_on: %u\n"
+ "boot_on: %u\n"
+ "apply_uV: %u\n"
+ "ramp_disable: %u\n"
+ "soft_start: %u\n"
+ "pull_down: %u\n"
+ "over_current_protection: %u\n",
+ c->always_on,
+ c->boot_on,
+ c->apply_uV,
+ c->ramp_disable,
+ c->soft_start,
+ c->pull_down,
+ c->over_current_protection);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
+
+ return ret;
+}
+
+#endif
+
+static const struct file_operations constraint_flags_fops = {
+#ifdef CONFIG_DEBUG_FS
+ .open = simple_open,
+ .read = constraint_flags_read_file,
+ .llseek = default_llseek,
+#endif
+};
+
#define REG_STR_SIZE 64
static struct regulator *create_regulator(struct regulator_dev *rdev,
@@ -1327,6 +1358,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
&regulator->min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
&regulator->max_uV);
+ debugfs_create_file("constraint_flags", 0444,
+ regulator->debugfs, regulator,
+ &constraint_flags_fops);
}
/*
@@ -1334,7 +1368,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
* it is then we don't need to do nearly so much work for
* enable/disable calls.
*/
- if (!_regulator_can_change_status(rdev) &&
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS) &&
_regulator_is_enabled(rdev))
regulator->always_on = true;
@@ -1532,10 +1566,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
/* Cascade always-on state to supply */
- if (_regulator_is_enabled(rdev) && rdev->supply) {
+ if (_regulator_is_enabled(rdev)) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
_regulator_put(rdev->supply);
+ rdev->supply = NULL;
return ret;
}
}
@@ -2111,15 +2146,15 @@ static int _regulator_enable(struct regulator_dev *rdev)
lockdep_assert_held_once(&rdev->mutex);
/* check voltage and requested load before enabling */
- if (rdev->constraints &&
- (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
drms_uA_update(rdev);
if (rdev->use_count == 0) {
/* The regulator may on if it's not switchable or left on */
ret = _regulator_is_enabled(rdev);
if (ret == -EINVAL || ret == 0) {
- if (!_regulator_can_change_status(rdev))
+ if (!regulator_ops_is_valid(rdev,
+ REGULATOR_CHANGE_STATUS))
return -EPERM;
ret = _regulator_do_enable(rdev);
@@ -2221,7 +2256,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
(rdev->constraints && !rdev->constraints->always_on)) {
/* we are last user */
- if (_regulator_can_change_status(rdev)) {
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
ret = _notifier_call_chain(rdev,
REGULATOR_EVENT_PRE_DISABLE,
NULL);
@@ -2242,10 +2277,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
rdev->use_count = 0;
} else if (rdev->use_count > 1) {
-
- if (rdev->constraints &&
- (rdev->constraints->valid_ops_mask &
- REGULATOR_CHANGE_DRMS))
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
drms_uA_update(rdev);
rdev->use_count--;
@@ -2489,8 +2521,7 @@ int regulator_can_change_voltage(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- if (rdev->constraints &&
- (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
return 1;
@@ -2644,7 +2675,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
int i, voltages, ret;
/* If we can't change voltage check the current voltage */
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
ret = regulator_get_voltage(regulator);
if (ret >= 0)
return min_uV <= ret && ret <= max_uV;
@@ -2850,7 +2881,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* return successfully even though the regulator does not support
* changing the voltage.
*/
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
current_uV = _regulator_get_voltage(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
regulator->min_uV = min_uV;
@@ -3109,6 +3140,23 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage);
static int _regulator_get_voltage(struct regulator_dev *rdev)
{
int sel, ret;
+ bool bypassed;
+
+ if (rdev->desc->ops->get_bypass) {
+ ret = rdev->desc->ops->get_bypass(rdev, &bypassed);
+ if (ret < 0)
+ return ret;
+ if (bypassed) {
+ /* if bypassed the regulator must have a supply */
+ if (!rdev->supply) {
+ rdev_err(rdev,
+ "bypassed regulator has no supply!\n");
+ return -EPROBE_DEFER;
+ }
+
+ return _regulator_get_voltage(rdev->supply->rdev);
+ }
+ }
if (rdev->desc->ops->get_voltage_sel) {
sel = rdev->desc->ops->get_voltage_sel(rdev);
@@ -3365,8 +3413,7 @@ int regulator_allow_bypass(struct regulator *regulator, bool enable)
if (!rdev->desc->ops->set_bypass)
return 0;
- if (rdev->constraints &&
- !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS))
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_BYPASS))
return 0;
mutex_lock(&rdev->mutex);
@@ -3840,6 +3887,16 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
&rdev->bypass_count);
}
+static int regulator_register_resolve_supply(struct device *dev, void *data)
+{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+
+ if (regulator_resolve_supply(rdev))
+ rdev_dbg(rdev, "unable to resolve supply\n");
+
+ return 0;
+}
+
/**
* regulator_register - register regulator
* @regulator_desc: regulator to register
@@ -3911,8 +3968,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
rdev->dev.of_node = of_node_get(config->of_node);
}
- mutex_lock(&regulator_list_mutex);
-
mutex_init(&rdev->mutex);
rdev->reg_data = config->driver_data;
rdev->owner = regulator_desc->owner;
@@ -3937,7 +3992,9 @@ regulator_register(const struct regulator_desc *regulator_desc,
if ((config->ena_gpio || config->ena_gpio_initialized) &&
gpio_is_valid(config->ena_gpio)) {
+ mutex_lock(&regulator_list_mutex);
ret = regulator_ena_gpio_request(rdev, config);
+ mutex_unlock(&regulator_list_mutex);
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
config->ena_gpio, ret);
@@ -3950,63 +4007,73 @@ regulator_register(const struct regulator_desc *regulator_desc,
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%lu",
(unsigned long) atomic_inc_return(&regulator_no));
- ret = device_register(&rdev->dev);
- if (ret != 0) {
- put_device(&rdev->dev);
- goto wash;
- }
-
- dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
if (init_data)
constraints = &init_data->constraints;
- ret = set_machine_constraints(rdev, constraints);
- if (ret < 0)
- goto scrub;
-
if (init_data && init_data->supply_regulator)
rdev->supply_name = init_data->supply_regulator;
else if (regulator_desc->supply_name)
rdev->supply_name = regulator_desc->supply_name;
+ /*
+ * Attempt to resolve the regulator supply, if specified,
+ * but don't return an error if we fail because we will try
+ * to resolve it again later as more regulators are added.
+ */
+ if (regulator_resolve_supply(rdev))
+ rdev_dbg(rdev, "unable to resolve supply\n");
+
+ ret = set_machine_constraints(rdev, constraints);
+ if (ret < 0)
+ goto wash;
+
/* add consumers devices */
if (init_data) {
+ mutex_lock(&regulator_list_mutex);
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
if (ret < 0) {
+ mutex_unlock(&regulator_list_mutex);
dev_err(dev, "Failed to set supply %s\n",
init_data->consumer_supplies[i].supply);
goto unset_supplies;
}
}
+ mutex_unlock(&regulator_list_mutex);
+ }
+
+ ret = device_register(&rdev->dev);
+ if (ret != 0) {
+ put_device(&rdev->dev);
+ goto unset_supplies;
}
+ dev_set_drvdata(&rdev->dev, rdev);
rdev_init_debugfs(rdev);
-out:
- mutex_unlock(&regulator_list_mutex);
+
+ /* try to resolve regulators supply since a new one was registered */
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_register_resolve_supply);
kfree(config);
return rdev;
unset_supplies:
+ mutex_lock(&regulator_list_mutex);
unset_regulator_supplies(rdev);
-
-scrub:
- regulator_ena_gpio_free(rdev);
- device_unregister(&rdev->dev);
- /* device core frees rdev */
- rdev = ERR_PTR(ret);
- goto out;
-
+ mutex_unlock(&regulator_list_mutex);
wash:
+ kfree(rdev->constraints);
+ mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
+ mutex_unlock(&regulator_list_mutex);
clean:
kfree(rdev);
- rdev = ERR_PTR(ret);
- goto out;
+ kfree(config);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(regulator_register);
@@ -4032,8 +4099,8 @@ void regulator_unregister(struct regulator_dev *rdev)
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
- mutex_unlock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
+ mutex_unlock(&regulator_list_mutex);
device_unregister(&rdev->dev);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -4386,7 +4453,7 @@ static int __init regulator_late_cleanup(struct device *dev, void *data)
if (c && c->always_on)
return 0;
- if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS))
return 0;
mutex_lock(&rdev->mutex);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 2cb5cc311610..d7da81a875cf 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -65,6 +65,13 @@ enum {
FAN53555_CHIP_ID_03,
FAN53555_CHIP_ID_04,
FAN53555_CHIP_ID_05,
+ FAN53555_CHIP_ID_08 = 8,
+};
+
+/* IC mask revision */
+enum {
+ FAN53555_CHIP_REV_00 = 0x3,
+ FAN53555_CHIP_REV_13 = 0xf,
};
enum {
@@ -217,9 +224,26 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
/* Init voltage range and step */
switch (di->chip_id) {
case FAN53555_CHIP_ID_00:
+ switch (di->chip_rev) {
+ case FAN53555_CHIP_REV_00:
+ di->vsel_min = 600000;
+ di->vsel_step = 10000;
+ break;
+ case FAN53555_CHIP_REV_13:
+ di->vsel_min = 800000;
+ di->vsel_step = 10000;
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d with rev %d not supported!\n",
+ di->chip_id, di->chip_rev);
+ return -EINVAL;
+ }
+ break;
case FAN53555_CHIP_ID_01:
case FAN53555_CHIP_ID_03:
case FAN53555_CHIP_ID_05:
+ case FAN53555_CHIP_ID_08:
di->vsel_min = 600000;
di->vsel_step = 10000;
break;
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index b1e32e7482e9..bcf38fd5106a 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -460,7 +460,7 @@ int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
if (ret != 0)
return ret;
- *enable = val & rdev->desc->bypass_mask;
+ *enable = (val & rdev->desc->bypass_mask) == rdev->desc->bypass_val_on;
return 0;
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 15c25c622edf..204b5c5270e0 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -365,8 +365,8 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
mutex_lock(&lp3971->io_lock);
ret = lp3971_i2c_read(lp3971->i2c, reg, 1, &tmp);
- tmp = (tmp & ~mask) | val;
if (ret == 0) {
+ tmp = (tmp & ~mask) | val;
ret = lp3971_i2c_write(lp3971->i2c, reg, 1, &tmp);
dev_dbg(lp3971->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val&0xff);
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 3a7e96e2c7b3..ff0c275f902e 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -211,8 +211,8 @@ static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val)
mutex_lock(&lp3972->io_lock);
ret = lp3972_i2c_read(lp3972->i2c, reg, 1, &tmp);
- tmp = (tmp & ~mask) | val;
if (ret == 0) {
+ tmp = (tmp & ~mask) | val;
ret = lp3972_i2c_write(lp3972->i2c, reg, 1, &tmp);
dev_dbg(lp3972->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val & 0xff);
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
new file mode 100644
index 000000000000..b4ffd113ba21
--- /dev/null
+++ b/drivers/regulator/lp873x-regulator.c
@@ -0,0 +1,241 @@
+/*
+ * Regulator driver for LP873X PMIC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp873x.h>
+
+#define LP873X_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, _er, _em, \
+ _delay, _lr, _nlr, _cr) \
+ [_id] = { \
+ .desc = { \
+ .name = _name, \
+ .id = _id, \
+ .of_match = of_match_ptr(_of), \
+ .regulators_node = of_match_ptr("regulators"),\
+ .ops = &_ops, \
+ .n_voltages = _n, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .ramp_delay = _delay, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ }, \
+ .ctrl2_reg = _cr, \
+ }
+
+struct lp873x_regulator {
+ struct regulator_desc desc;
+ unsigned int ctrl2_reg;
+};
+
+static const struct lp873x_regulator regulators[];
+
+static const struct regulator_linear_range buck0_buck1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0x0, 0x13, 0),
+ REGULATOR_LINEAR_RANGE(700000, 0x14, 0x17, 10000),
+ REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
+ REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
+};
+
+static const struct regulator_linear_range ldo0_ldo1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000),
+};
+
+static unsigned int lp873x_buck_ramp_delay[] = {
+ 30000, 15000, 10000, 7500, 3800, 1900, 940, 470
+};
+
+/* LP873X BUCK current limit */
+static const unsigned int lp873x_buck_uA[] = {
+ 1500000, 2000000, 2500000, 3000000, 3500000, 4000000,
+};
+
+static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ int id = rdev_get_id(rdev);
+ struct lp873x *lp873 = rdev_get_drvdata(rdev);
+ unsigned int reg;
+ int ret;
+
+ if (ramp_delay <= 470)
+ reg = 7;
+ else if (ramp_delay <= 940)
+ reg = 6;
+ else if (ramp_delay <= 1900)
+ reg = 5;
+ else if (ramp_delay <= 3800)
+ reg = 4;
+ else if (ramp_delay <= 7500)
+ reg = 3;
+ else if (ramp_delay <= 10000)
+ reg = 2;
+ else if (ramp_delay <= 15000)
+ reg = 1;
+ else
+ reg = 0;
+
+ ret = regmap_update_bits(lp873->regmap, regulators[id].ctrl2_reg,
+ LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE,
+ reg << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE));
+ if (ret) {
+ dev_err(lp873->dev, "SLEW RATE write failed: %d\n", ret);
+ return ret;
+ }
+
+ rdev->constraints->ramp_delay = lp873x_buck_ramp_delay[reg];
+
+ return 0;
+}
+
+static int lp873x_buck_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ int id = rdev_get_id(rdev);
+ struct lp873x *lp873 = rdev_get_drvdata(rdev);
+ int i;
+
+ for (i = ARRAY_SIZE(lp873x_buck_uA) - 1; i >= 0; i--) {
+ if (lp873x_buck_uA[i] >= min_uA &&
+ lp873x_buck_uA[i] <= max_uA)
+ return regmap_update_bits(lp873->regmap,
+ regulators[id].ctrl2_reg,
+ LP873X_BUCK0_CTRL_2_BUCK0_ILIM,
+ i << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM));
+ }
+
+ return -EINVAL;
+}
+
+static int lp873x_buck_get_current_limit(struct regulator_dev *rdev)
+{
+ int id = rdev_get_id(rdev);
+ struct lp873x *lp873 = rdev_get_drvdata(rdev);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(lp873->regmap, regulators[id].ctrl2_reg, &val);
+ if (ret)
+ return ret;
+
+ val = (val & LP873X_BUCK0_CTRL_2_BUCK0_ILIM) >>
+ __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM);
+
+ return (val < ARRAY_SIZE(lp873x_buck_uA)) ?
+ lp873x_buck_uA[val] : -EINVAL;
+}
+
+/* Operations permitted on BUCK0, BUCK1 */
+static struct regulator_ops lp873x_buck01_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = lp873x_buck_set_ramp_delay,
+ .set_current_limit = lp873x_buck_set_current_limit,
+ .get_current_limit = lp873x_buck_get_current_limit,
+};
+
+/* Operations permitted on LDO0 and LDO1 */
+static struct regulator_ops lp873x_ldo01_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct lp873x_regulator regulators[] = {
+ LP873X_REGULATOR("BUCK0", LP873X_BUCK_0, "buck0", lp873x_buck01_ops,
+ 256, LP873X_REG_BUCK0_VOUT,
+ LP873X_BUCK0_VOUT_BUCK0_VSET, LP873X_REG_BUCK0_CTRL_1,
+ LP873X_BUCK0_CTRL_1_BUCK0_EN, 10000,
+ buck0_buck1_ranges, 4, LP873X_REG_BUCK0_CTRL_2),
+ LP873X_REGULATOR("BUCK1", LP873X_BUCK_1, "buck1", lp873x_buck01_ops,
+ 256, LP873X_REG_BUCK1_VOUT,
+ LP873X_BUCK1_VOUT_BUCK1_VSET, LP873X_REG_BUCK1_CTRL_1,
+ LP873X_BUCK1_CTRL_1_BUCK1_EN, 10000,
+ buck0_buck1_ranges, 4, LP873X_REG_BUCK1_CTRL_2),
+ LP873X_REGULATOR("LDO0", LP873X_LDO_0, "ldo0", lp873x_ldo01_ops, 26,
+ LP873X_REG_LDO0_VOUT, LP873X_LDO0_VOUT_LDO0_VSET,
+ LP873X_REG_LDO0_CTRL,
+ LP873X_LDO0_CTRL_LDO0_EN, 0, ldo0_ldo1_ranges, 1,
+ 0xFF),
+ LP873X_REGULATOR("LDO1", LP873X_LDO_1, "ldo1", lp873x_ldo01_ops, 26,
+ LP873X_REG_LDO1_VOUT, LP873X_LDO1_VOUT_LDO1_VSET,
+ LP873X_REG_LDO1_CTRL,
+ LP873X_LDO1_CTRL_LDO1_EN, 0, ldo0_ldo1_ranges, 1,
+ 0xFF),
+};
+
+static int lp873x_regulator_probe(struct platform_device *pdev)
+{
+ struct lp873x *lp873 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ int i;
+
+ platform_set_drvdata(pdev, lp873);
+
+ config.dev = &pdev->dev;
+ config.dev->of_node = lp873->dev->of_node;
+ config.driver_data = lp873;
+ config.regmap = lp873->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i].desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(lp873->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id lp873x_regulator_id_table[] = {
+ { "lp873x-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, lp873x_regulator_id_table);
+
+static struct platform_driver lp873x_regulator_driver = {
+ .driver = {
+ .name = "lp873x-pmic",
+ },
+ .probe = lp873x_regulator_probe,
+ .id_table = lp873x_regulator_id_table,
+};
+module_platform_driver(lp873x_regulator_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP873X voltage regulator driver");
+MODULE_ALIAS("platform:lp873x-pmic");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577-regulator.c
index b2daa6641417..b2daa6641417 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577-regulator.c
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 73a3356a5c19..321e804aeab0 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -81,6 +81,7 @@ struct max77620_regulator_pdata {
int suspend_fps_pd_slot;
int suspend_fps_pu_slot;
int current_mode;
+ int ramp_rate_setting;
};
struct max77620_regulator {
@@ -307,6 +308,43 @@ static int max77620_read_slew_rate(struct max77620_regulator *pmic, int id)
return 0;
}
+static int max77620_set_slew_rate(struct max77620_regulator *pmic, int id,
+ int slew_rate)
+{
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ unsigned int val;
+ int ret;
+ u8 mask;
+
+ if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
+ if (slew_rate <= 13750)
+ val = 0;
+ else if (slew_rate <= 27500)
+ val = 1;
+ else if (slew_rate <= 55000)
+ val = 2;
+ else
+ val = 3;
+ val <<= MAX77620_SD_SR_SHIFT;
+ mask = MAX77620_SD_SR_MASK;
+ } else {
+ if (slew_rate <= 5000)
+ val = 1;
+ else
+ val = 0;
+ mask = MAX77620_LDO_SLEW_RATE_MASK;
+ }
+
+ ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Regulator %d slew rate set failed: %d\n",
+ id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
{
struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
@@ -351,6 +389,13 @@ static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
if (ret < 0)
return ret;
+ if (rpdata->ramp_rate_setting) {
+ ret = max77620_set_slew_rate(pmic, id,
+ rpdata->ramp_rate_setting);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -502,35 +547,16 @@ static int max77620_regulator_set_ramp_delay(struct regulator_dev *rdev,
{
struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- struct max77620_regulator_info *rinfo = pmic->rinfo[id];
- int ret, val;
- u8 mask;
-
- if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
- if (ramp_delay <= 13750)
- val = 0;
- else if (ramp_delay <= 27500)
- val = 1;
- else if (ramp_delay <= 55000)
- val = 2;
- else
- val = 3;
- val <<= MAX77620_SD_SR_SHIFT;
- mask = MAX77620_SD_SR_MASK;
- } else {
- if (ramp_delay <= 5000)
- val = 1;
- else
- val = 0;
- mask = MAX77620_LDO_SLEW_RATE_MASK;
- }
+ struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
- ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
- if (ret < 0)
- dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
- rinfo->cfg_addr, ret);
+ /* Device specific ramp rate setting tells that platform has
+ * different ramp rate from advertised value. In this case,
+ * do not configure anything and just return success.
+ */
+ if (rpdata->ramp_rate_setting)
+ return 0;
- return ret;
+ return max77620_set_slew_rate(pmic, id, ramp_delay);
}
static int max77620_of_parse_cb(struct device_node *np,
@@ -563,6 +589,9 @@ static int max77620_of_parse_cb(struct device_node *np,
np, "maxim,suspend-fps-power-down-slot", &pval);
rpdata->suspend_fps_pd_slot = (!ret) ? pval : -1;
+ ret = of_property_read_u32(np, "maxim,ramp-rate-setting", &pval);
+ rpdata->ramp_rate_setting = (!ret) ? pval : 0;
+
return max77620_init_pmic(pmic, desc->id);
}
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index 17ccf365a9c0..ac4fa581e0a5 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -41,6 +41,8 @@
#define MAX77686_LDO_LOW_UVSTEP 25000
#define MAX77686_BUCK_MINUV 750000
#define MAX77686_BUCK_UVSTEP 50000
+#define MAX77686_BUCK_ENABLE_TIME 40 /* us */
+#define MAX77686_DVS_ENABLE_TIME 22 /* us */
#define MAX77686_RAMP_DELAY 100000 /* uV/us */
#define MAX77686_DVS_RAMP_DELAY 27500 /* uV/us */
#define MAX77686_DVS_MINUV 600000
@@ -422,6 +424,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.min_uV = MAX77686_BUCK_MINUV, \
.uV_step = MAX77686_BUCK_UVSTEP, \
.ramp_delay = MAX77686_RAMP_DELAY, \
+ .enable_time = MAX77686_BUCK_ENABLE_TIME, \
.n_voltages = MAX77686_VSEL_MASK + 1, \
.vsel_reg = MAX77686_REG_BUCK5OUT + (num - 5) * 2, \
.vsel_mask = MAX77686_VSEL_MASK, \
@@ -439,6 +442,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.min_uV = MAX77686_BUCK_MINUV, \
.uV_step = MAX77686_BUCK_UVSTEP, \
.ramp_delay = MAX77686_RAMP_DELAY, \
+ .enable_time = MAX77686_BUCK_ENABLE_TIME, \
.n_voltages = MAX77686_VSEL_MASK + 1, \
.vsel_reg = MAX77686_REG_BUCK1OUT, \
.vsel_mask = MAX77686_VSEL_MASK, \
@@ -456,6 +460,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.min_uV = MAX77686_DVS_MINUV, \
.uV_step = MAX77686_DVS_UVSTEP, \
.ramp_delay = MAX77686_DVS_RAMP_DELAY, \
+ .enable_time = MAX77686_DVS_ENABLE_TIME, \
.n_voltages = MAX77686_DVS_VSEL_MASK + 1, \
.vsel_reg = MAX77686_REG_BUCK2DVS1 + (num - 2) * 10, \
.vsel_mask = MAX77686_DVS_VSEL_MASK, \
@@ -553,17 +558,7 @@ static struct platform_driver max77686_pmic_driver = {
.id_table = max77686_pmic_id,
};
-static int __init max77686_pmic_init(void)
-{
- return platform_driver_register(&max77686_pmic_driver);
-}
-subsys_initcall(max77686_pmic_init);
-
-static void __exit max77686_pmic_cleanup(void)
-{
- platform_driver_unregister(&max77686_pmic_driver);
-}
-module_exit(max77686_pmic_cleanup);
+module_platform_driver(max77686_pmic_driver);
MODULE_DESCRIPTION("MAXIM 77686 Regulator Driver");
MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693-regulator.c
index de730fd3f8a5..de730fd3f8a5 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693-regulator.c
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index c07ee13bd470..1d3539324d9a 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -5,7 +5,7 @@
* Simon Glass <sjg@chromium.org>
*
* Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@smasung.com>
+ * Chiwoong Byun <woong.byun@samsung.com>
* Jonghwa Lee <jonghwa3.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 5b75b7c2e3ea..08d2f13eca00 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -38,6 +38,9 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
+#include <linux/thermal.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
/* Register definitions */
#define MAX8973_VOUT 0x0
@@ -74,6 +77,7 @@
#define MAX8973_WDTMR_ENABLE BIT(6)
#define MAX8973_DISCH_ENBABLE BIT(5)
#define MAX8973_FT_ENABLE BIT(4)
+#define MAX77621_T_JUNCTION_120 BIT(7)
#define MAX8973_CKKADV_TRIP_MASK 0xC
#define MAX8973_CKKADV_TRIP_DISABLE 0xC
@@ -93,6 +97,12 @@
#define MAX8973_VOLATGE_STEP 6250
#define MAX8973_BUCK_N_VOLTAGE 0x80
+#define MAX77621_CHIPID_TJINT_S BIT(0)
+
+#define MAX77621_NORMAL_OPERATING_TEMP 100000
+#define MAX77621_TJINT_WARNING_TEMP_120 120000
+#define MAX77621_TJINT_WARNING_TEMP_140 140000
+
enum device_id {
MAX8973,
MAX77621
@@ -112,6 +122,9 @@ struct max8973_chip {
int curr_gpio_val;
struct regulator_ops ops;
enum device_id id;
+ int junction_temp_warning;
+ int irq;
+ struct thermal_zone_device *tz_device;
};
/*
@@ -391,6 +404,10 @@ static int max8973_init_dcdc(struct max8973_chip *max,
if (pdata->control_flags & MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE)
control1 |= MAX8973_FREQSHIFT_9PER;
+ if ((pdata->junction_temp_warning == MAX77621_TJINT_WARNING_TEMP_120) &&
+ (max->id == MAX77621))
+ control2 |= MAX77621_T_JUNCTION_120;
+
if (!(pdata->control_flags & MAX8973_CONTROL_PULL_DOWN_ENABLE))
control2 |= MAX8973_DISCH_ENBABLE;
@@ -457,6 +474,79 @@ static int max8973_init_dcdc(struct max8973_chip *max,
return ret;
}
+static int max8973_thermal_read_temp(void *data, int *temp)
+{
+ struct max8973_chip *mchip = data;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(mchip->regmap, MAX8973_CHIPID1, &val);
+ if (ret < 0) {
+ dev_err(mchip->dev, "Failed to read register CHIPID1, %d", ret);
+ return ret;
+ }
+
+ /* +1 degC to trigger cool devive */
+ if (val & MAX77621_CHIPID_TJINT_S)
+ *temp = mchip->junction_temp_warning + 1000;
+ else
+ *temp = MAX77621_NORMAL_OPERATING_TEMP;
+
+ return 0;
+}
+
+static irqreturn_t max8973_thermal_irq(int irq, void *data)
+{
+ struct max8973_chip *mchip = data;
+
+ thermal_zone_device_update(mchip->tz_device);
+
+ return IRQ_HANDLED;
+}
+
+static const struct thermal_zone_of_device_ops max77621_tz_ops = {
+ .get_temp = max8973_thermal_read_temp,
+};
+
+static int max8973_thermal_init(struct max8973_chip *mchip)
+{
+ struct thermal_zone_device *tzd;
+ struct irq_data *irq_data;
+ unsigned long irq_flags = 0;
+ int ret;
+
+ if (mchip->id != MAX77621)
+ return 0;
+
+ tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip,
+ &max77621_tz_ops);
+ if (IS_ERR(tzd)) {
+ ret = PTR_ERR(tzd);
+ dev_err(mchip->dev, "Failed to register thermal sensor: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (mchip->irq <= 0)
+ return 0;
+
+ irq_data = irq_get_irq_data(mchip->irq);
+ if (irq_data)
+ irq_flags = irqd_get_trigger_type(irq_data);
+
+ ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL,
+ max8973_thermal_irq,
+ IRQF_ONESHOT | IRQF_SHARED | irq_flags,
+ dev_name(mchip->dev), mchip);
+ if (ret < 0) {
+ dev_err(mchip->dev, "Failed to request irq %d, %d\n",
+ mchip->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct regmap_config max8973_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -521,6 +611,11 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED;
}
+ pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_140;
+ ret = of_property_read_u32(np, "junction-warn-millicelsius", &pval);
+ if (!ret && (pval <= MAX77621_TJINT_WARNING_TEMP_120))
+ pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_120;
+
return pdata;
}
@@ -608,6 +703,7 @@ static int max8973_probe(struct i2c_client *client,
max->enable_external_control = pdata->enable_ext_control;
max->curr_gpio_val = pdata->dvs_def_state;
max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
+ max->junction_temp_warning = pdata->junction_temp_warning;
if (gpio_is_valid(max->enable_gpio))
max->enable_external_control = true;
@@ -718,6 +814,7 @@ static int max8973_probe(struct i2c_client *client,
return ret;
}
+ max8973_thermal_init(max);
return 0;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997-regulator.c
index ea0196d4496b..efabc0ea0e96 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -2,7 +2,7 @@
* max8997.c - Regulator driver for the Maxim 8997/8966
*
* Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@smasung.com>
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 6b0aa80b22fd..cd828dbf9d52 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -45,9 +45,9 @@ static void of_get_regulation_constraints(struct device_node *np,
/* Voltage change possible? */
if (constraints->min_uV != constraints->max_uV)
constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
- /* Only one voltage? Then make sure it's set. */
- if (constraints->min_uV && constraints->max_uV &&
- constraints->min_uV == constraints->max_uV)
+
+ /* Do we have a voltage range, if so try to apply it? */
+ if (constraints->min_uV && constraints->max_uV)
constraints->apply_uV = true;
if (!of_property_read_u32(np, "regulator-microvolt-offset", &pval))
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 6efc7ee8aea3..f11d41dad9c1 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -944,6 +944,8 @@ static int palmas_ldo_registration(struct palmas_pmic *pmic,
if (id == PALMAS_REG_LDO9) {
desc->ops = &palmas_ops_ldo9;
desc->bypass_reg = desc->enable_reg;
+ desc->bypass_val_on =
+ PALMAS_LDO9_CTRL_LDO_BYPASS_EN;
desc->bypass_mask =
PALMAS_LDO9_CTRL_LDO_BYPASS_EN;
}
@@ -1055,6 +1057,8 @@ static int tps65917_ldo_registration(struct palmas_pmic *pmic,
id == TPS65917_REG_LDO2) {
desc->ops = &tps65917_ops_ldo_1_2;
desc->bypass_reg = desc->enable_reg;
+ desc->bypass_val_on =
+ TPS65917_LDO1_CTRL_BYPASS_EN;
desc->bypass_mask =
TPS65917_LDO1_CTRL_BYPASS_EN;
}
@@ -1206,6 +1210,7 @@ static int palmas_smps_registration(struct palmas_pmic *pmic,
desc->enable_mask = SMPS10_BOOST_EN;
desc->bypass_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
PALMAS_SMPS10_CTRL);
+ desc->bypass_val_on = SMPS10_BYPASS_EN;
desc->bypass_mask = SMPS10_BYPASS_EN;
desc->min_uV = 3750000;
desc->uV_step = 1250000;
@@ -1462,10 +1467,10 @@ static struct palmas_pmic_driver_data tps65917_ddata = {
.ldo_register = tps65917_ldo_registration,
};
-static void palmas_dt_to_pdata(struct device *dev,
- struct device_node *node,
- struct palmas_pmic_platform_data *pdata,
- struct palmas_pmic_driver_data *ddata)
+static int palmas_dt_to_pdata(struct device *dev,
+ struct device_node *node,
+ struct palmas_pmic_platform_data *pdata,
+ struct palmas_pmic_driver_data *ddata)
{
struct device_node *regulators;
u32 prop;
@@ -1474,7 +1479,7 @@ static void palmas_dt_to_pdata(struct device *dev,
regulators = of_get_child_by_name(node, "regulators");
if (!regulators) {
dev_info(dev, "regulator node not found\n");
- return;
+ return 0;
}
ret = of_regulator_match(dev, regulators, ddata->palmas_matches,
@@ -1482,25 +1487,29 @@ static void palmas_dt_to_pdata(struct device *dev,
of_node_put(regulators);
if (ret < 0) {
dev_err(dev, "Error parsing regulator init data: %d\n", ret);
- return;
+ return 0;
}
for (idx = 0; idx < ddata->max_reg; idx++) {
- if (!ddata->palmas_matches[idx].init_data ||
- !ddata->palmas_matches[idx].of_node)
- continue;
+ static struct of_regulator_match *match;
+ struct palmas_reg_init *rinit;
+ struct device_node *np;
- pdata->reg_data[idx] = ddata->palmas_matches[idx].init_data;
+ match = &ddata->palmas_matches[idx];
+ np = match->of_node;
- pdata->reg_init[idx] = devm_kzalloc(dev,
- sizeof(struct palmas_reg_init), GFP_KERNEL);
+ if (!match->init_data || !np)
+ continue;
+
+ rinit = devm_kzalloc(dev, sizeof(*rinit), GFP_KERNEL);
+ if (!rinit)
+ return -ENOMEM;
- pdata->reg_init[idx]->warm_reset =
- of_property_read_bool(ddata->palmas_matches[idx].of_node,
- "ti,warm-reset");
+ pdata->reg_data[idx] = match->init_data;
+ pdata->reg_init[idx] = rinit;
- ret = of_property_read_u32(ddata->palmas_matches[idx].of_node,
- "ti,roof-floor", &prop);
+ rinit->warm_reset = of_property_read_bool(np, "ti,warm-reset");
+ ret = of_property_read_u32(np, "ti,roof-floor", &prop);
/* EINVAL: Property not found */
if (ret != -EINVAL) {
int econtrol;
@@ -1522,31 +1531,29 @@ static void palmas_dt_to_pdata(struct device *dev,
WARN_ON(1);
dev_warn(dev,
"%s: Invalid roof-floor option: %u\n",
- palmas_matches[idx].name, prop);
+ match->name, prop);
break;
}
}
- pdata->reg_init[idx]->roof_floor = econtrol;
+ rinit->roof_floor = econtrol;
}
- ret = of_property_read_u32(ddata->palmas_matches[idx].of_node,
- "ti,mode-sleep", &prop);
+ ret = of_property_read_u32(np, "ti,mode-sleep", &prop);
if (!ret)
- pdata->reg_init[idx]->mode_sleep = prop;
+ rinit->mode_sleep = prop;
- ret = of_property_read_bool(ddata->palmas_matches[idx].of_node,
- "ti,smps-range");
+ ret = of_property_read_bool(np, "ti,smps-range");
if (ret)
- pdata->reg_init[idx]->vsel =
- PALMAS_SMPS12_VOLTAGE_RANGE;
+ rinit->vsel = PALMAS_SMPS12_VOLTAGE_RANGE;
if (idx == PALMAS_REG_LDO8)
pdata->enable_ldo8_tracking = of_property_read_bool(
- ddata->palmas_matches[idx].of_node,
- "ti,enable-ldo8-tracking");
+ np, "ti,enable-ldo8-tracking");
}
pdata->ldo6_vibrator = of_property_read_bool(node, "ti,ldo6-vibrator");
+
+ return 0;
}
static const struct of_device_id of_palmas_match_tbl[] = {
@@ -1628,7 +1635,9 @@ static int palmas_regulators_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
pmic->palmas->pmic_ddata = driver_data;
- palmas_dt_to_pdata(&pdev->dev, node, pdata, driver_data);
+ ret = palmas_dt_to_pdata(&pdev->dev, node, pdata, driver_data);
+ if (ret)
+ return ret;
ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg);
if (ret)
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
new file mode 100644
index 000000000000..d7107566c429
--- /dev/null
+++ b/drivers/regulator/pv88080-regulator.c
@@ -0,0 +1,419 @@
+/*
+ * pv88080-regulator.c - Regulator device driver for PV88080
+ * Copyright (C) 2016 Powerventure Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include "pv88080-regulator.h"
+
+#define PV88080_MAX_REGULATORS 3
+
+/* PV88080 REGULATOR IDs */
+enum {
+ /* BUCKs */
+ PV88080_ID_BUCK1,
+ PV88080_ID_BUCK2,
+ PV88080_ID_BUCK3,
+};
+
+struct pv88080_regulator {
+ struct regulator_desc desc;
+ /* Current limiting */
+ unsigned int n_current_limits;
+ const int *current_limits;
+ unsigned int limit_mask;
+ unsigned int conf;
+ unsigned int conf2;
+ unsigned int conf5;
+};
+
+struct pv88080 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_dev *rdev[PV88080_MAX_REGULATORS];
+};
+
+struct pv88080_buck_voltage {
+ int min_uV;
+ int max_uV;
+ int uV_step;
+};
+
+static const struct regmap_config pv88080_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+/* Current limits array (in uA) for BUCK1, BUCK2, BUCK3.
+ * Entry indexes corresponds to register values.
+ */
+
+static const int pv88080_buck1_limits[] = {
+ 3230000, 5130000, 6960000, 8790000
+};
+
+static const int pv88080_buck23_limits[] = {
+ 1496000, 2393000, 3291000, 4189000
+};
+
+static const struct pv88080_buck_voltage pv88080_buck_vol[2] = {
+ {
+ .min_uV = 600000,
+ .max_uV = 1393750,
+ .uV_step = 6250,
+ },
+ {
+ .min_uV = 1400000,
+ .max_uV = 2193750,
+ .uV_step = 6250,
+ },
+};
+
+static unsigned int pv88080_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ unsigned int data;
+ int ret, mode = 0;
+
+ ret = regmap_read(rdev->regmap, info->conf, &data);
+ if (ret < 0)
+ return ret;
+
+ switch (data & PV88080_BUCK1_MODE_MASK) {
+ case PV88080_BUCK_MODE_SYNC:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case PV88080_BUCK_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case PV88080_BUCK_MODE_SLEEP:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mode;
+}
+
+static int pv88080_buck_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ int val = 0;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = PV88080_BUCK_MODE_SYNC;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = PV88080_BUCK_MODE_AUTO;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = PV88080_BUCK_MODE_SLEEP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, info->conf,
+ PV88080_BUCK1_MODE_MASK, val);
+}
+
+static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
+ int max)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ int i;
+
+ /* search for closest to maximum */
+ for (i = info->n_current_limits; i >= 0; i--) {
+ if (min <= info->current_limits[i]
+ && max >= info->current_limits[i]) {
+ return regmap_update_bits(rdev->regmap,
+ info->conf,
+ info->limit_mask,
+ i << PV88080_BUCK1_ILIM_SHIFT);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int pv88080_get_current_limit(struct regulator_dev *rdev)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->conf, &data);
+ if (ret < 0)
+ return ret;
+
+ data = (data & info->limit_mask) >> PV88080_BUCK1_ILIM_SHIFT;
+ return info->current_limits[data];
+}
+
+static struct regulator_ops pv88080_buck_ops = {
+ .get_mode = pv88080_buck_get_mode,
+ .set_mode = pv88080_buck_set_mode,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_current_limit = pv88080_set_current_limit,
+ .get_current_limit = pv88080_get_current_limit,
+};
+
+#define PV88080_BUCK(chip, regl_name, min, step, max, limits_array) \
+{\
+ .desc = {\
+ .id = chip##_ID_##regl_name,\
+ .name = __stringify(chip##_##regl_name),\
+ .of_match = of_match_ptr(#regl_name),\
+ .regulators_node = of_match_ptr("regulators"),\
+ .type = REGULATOR_VOLTAGE,\
+ .owner = THIS_MODULE,\
+ .ops = &pv88080_buck_ops,\
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = ((max) - (min))/(step) + 1, \
+ .enable_reg = PV88080_REG_##regl_name##_CONF0, \
+ .enable_mask = PV88080_##regl_name##_EN, \
+ .vsel_reg = PV88080_REG_##regl_name##_CONF0, \
+ .vsel_mask = PV88080_V##regl_name##_MASK, \
+ },\
+ .current_limits = limits_array, \
+ .n_current_limits = ARRAY_SIZE(limits_array), \
+ .limit_mask = PV88080_##regl_name##_ILIM_MASK, \
+ .conf = PV88080_REG_##regl_name##_CONF1, \
+ .conf2 = PV88080_REG_##regl_name##_CONF2, \
+ .conf5 = PV88080_REG_##regl_name##_CONF5, \
+}
+
+static struct pv88080_regulator pv88080_regulator_info[] = {
+ PV88080_BUCK(PV88080, BUCK1, 600000, 6250, 1393750,
+ pv88080_buck1_limits),
+ PV88080_BUCK(PV88080, BUCK2, 600000, 6250, 1393750,
+ pv88080_buck23_limits),
+ PV88080_BUCK(PV88080, BUCK3, 600000, 6250, 1393750,
+ pv88080_buck23_limits),
+};
+
+static irqreturn_t pv88080_irq_handler(int irq, void *data)
+{
+ struct pv88080 *chip = data;
+ int i, reg_val, err, ret = IRQ_NONE;
+
+ err = regmap_read(chip->regmap, PV88080_REG_EVENT_A, &reg_val);
+ if (err < 0)
+ goto error_i2c;
+
+ if (reg_val & PV88080_E_VDD_FLT) {
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
+ }
+ }
+
+ err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
+ PV88080_E_VDD_FLT);
+ if (err < 0)
+ goto error_i2c;
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (reg_val & PV88080_E_OVER_TEMP) {
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_OVER_TEMP,
+ NULL);
+ }
+ }
+
+ err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
+ PV88080_E_OVER_TEMP);
+ if (err < 0)
+ goto error_i2c;
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+
+error_i2c:
+ dev_err(chip->dev, "I2C error : %d\n", err);
+ return IRQ_NONE;
+}
+
+/*
+ * I2C driver interface functions
+ */
+static int pv88080_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
+ struct pv88080 *chip;
+ struct regulator_config config = { };
+ int i, error, ret;
+ unsigned int conf2, conf5;
+
+ chip = devm_kzalloc(&i2c->dev, sizeof(struct pv88080), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &i2c->dev;
+ chip->regmap = devm_regmap_init_i2c(i2c, &pv88080_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ error = PTR_ERR(chip->regmap);
+ dev_err(chip->dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ i2c_set_clientdata(i2c, chip);
+
+ if (i2c->irq != 0) {
+ ret = regmap_write(chip->regmap, PV88080_REG_MASK_A, 0xFF);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to mask A reg: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_write(chip->regmap, PV88080_REG_MASK_B, 0xFF);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to mask B reg: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_write(chip->regmap, PV88080_REG_MASK_C, 0xFF);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to mask C reg: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ pv88080_irq_handler,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ "pv88080", chip);
+ if (ret != 0) {
+ dev_err(chip->dev, "Failed to request IRQ: %d\n",
+ i2c->irq);
+ return ret;
+ }
+
+ ret = regmap_update_bits(chip->regmap, PV88080_REG_MASK_A,
+ PV88080_M_VDD_FLT | PV88080_M_OVER_TEMP, 0);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to update mask reg: %d\n", ret);
+ return ret;
+ }
+
+ } else {
+ dev_warn(chip->dev, "No IRQ configured\n");
+ }
+
+ config.dev = chip->dev;
+ config.regmap = chip->regmap;
+
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (init_data)
+ config.init_data = &init_data[i];
+
+ ret = regmap_read(chip->regmap,
+ pv88080_regulator_info[i].conf2, &conf2);
+ if (ret < 0)
+ return ret;
+
+ conf2 = ((conf2 >> PV88080_BUCK_VDAC_RANGE_SHIFT) &
+ PV88080_BUCK_VDAC_RANGE_MASK);
+
+ ret = regmap_read(chip->regmap,
+ pv88080_regulator_info[i].conf5, &conf5);
+ if (ret < 0)
+ return ret;
+
+ conf5 = ((conf5 >> PV88080_BUCK_VRANGE_GAIN_SHIFT) &
+ PV88080_BUCK_VRANGE_GAIN_MASK);
+
+ pv88080_regulator_info[i].desc.min_uV =
+ pv88080_buck_vol[conf2].min_uV * (conf5+1);
+ pv88080_regulator_info[i].desc.uV_step =
+ pv88080_buck_vol[conf2].uV_step * (conf5+1);
+ pv88080_regulator_info[i].desc.n_voltages =
+ ((pv88080_buck_vol[conf2].max_uV * (conf5+1))
+ - (pv88080_regulator_info[i].desc.min_uV))
+ /(pv88080_regulator_info[i].desc.uV_step) + 1;
+
+ config.driver_data = (void *)&pv88080_regulator_info[i];
+ chip->rdev[i] = devm_regulator_register(chip->dev,
+ &pv88080_regulator_info[i].desc, &config);
+ if (IS_ERR(chip->rdev[i])) {
+ dev_err(chip->dev,
+ "Failed to register PV88080 regulator\n");
+ return PTR_ERR(chip->rdev[i]);
+ }
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id pv88080_i2c_id[] = {
+ {"pv88080", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, pv88080_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pv88080_dt_ids[] = {
+ { .compatible = "pvs,pv88080", .data = &pv88080_i2c_id[0] },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pv88080_dt_ids);
+#endif
+
+static struct i2c_driver pv88080_regulator_driver = {
+ .driver = {
+ .name = "pv88080",
+ .of_match_table = of_match_ptr(pv88080_dt_ids),
+ },
+ .probe = pv88080_i2c_probe,
+ .id_table = pv88080_i2c_id,
+};
+
+module_i2c_driver(pv88080_regulator_driver);
+
+MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
+MODULE_DESCRIPTION("Regulator device driver for Powerventure PV88080");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pv88080-regulator.h b/drivers/regulator/pv88080-regulator.h
new file mode 100644
index 000000000000..5e9afde606f4
--- /dev/null
+++ b/drivers/regulator/pv88080-regulator.h
@@ -0,0 +1,92 @@
+/*
+ * pv88080-regulator.h - Regulator definitions for PV88080
+ * Copyright (C) 2016 Powerventure Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PV88080_REGISTERS_H__
+#define __PV88080_REGISTERS_H__
+
+/* System Control and Event Registers */
+#define PV88080_REG_EVENT_A 0x04
+#define PV88080_REG_MASK_A 0x09
+#define PV88080_REG_MASK_B 0x0a
+#define PV88080_REG_MASK_C 0x0b
+
+/* Regulator Registers */
+#define PV88080_REG_BUCK1_CONF0 0x27
+#define PV88080_REG_BUCK1_CONF1 0x28
+#define PV88080_REG_BUCK1_CONF2 0x59
+#define PV88080_REG_BUCK1_CONF5 0x5c
+#define PV88080_REG_BUCK2_CONF0 0x29
+#define PV88080_REG_BUCK2_CONF1 0x2a
+#define PV88080_REG_BUCK2_CONF2 0x61
+#define PV88080_REG_BUCK2_CONF5 0x64
+#define PV88080_REG_BUCK3_CONF0 0x2b
+#define PV88080_REG_BUCK3_CONF1 0x2c
+#define PV88080_REG_BUCK3_CONF2 0x69
+#define PV88080_REG_BUCK3_CONF5 0x6c
+
+/* PV88080_REG_EVENT_A (addr=0x04) */
+#define PV88080_E_VDD_FLT 0x01
+#define PV88080_E_OVER_TEMP 0x02
+
+/* PV88080_REG_MASK_A (addr=0x09) */
+#define PV88080_M_VDD_FLT 0x01
+#define PV88080_M_OVER_TEMP 0x02
+
+/* PV88080_REG_BUCK1_CONF0 (addr=0x27) */
+#define PV88080_BUCK1_EN 0x80
+#define PV88080_VBUCK1_MASK 0x7F
+/* PV88080_REG_BUCK2_CONF0 (addr=0x29) */
+#define PV88080_BUCK2_EN 0x80
+#define PV88080_VBUCK2_MASK 0x7F
+/* PV88080_REG_BUCK3_CONF0 (addr=0x2b) */
+#define PV88080_BUCK3_EN 0x80
+#define PV88080_VBUCK3_MASK 0x7F
+
+/* PV88080_REG_BUCK1_CONF1 (addr=0x28) */
+#define PV88080_BUCK1_ILIM_SHIFT 2
+#define PV88080_BUCK1_ILIM_MASK 0x0C
+#define PV88080_BUCK1_MODE_MASK 0x03
+
+/* PV88080_REG_BUCK2_CONF1 (addr=0x2a) */
+#define PV88080_BUCK2_ILIM_SHIFT 2
+#define PV88080_BUCK2_ILIM_MASK 0x0C
+#define PV88080_BUCK2_MODE_MASK 0x03
+
+/* PV88080_REG_BUCK3_CONF1 (addr=0x2c) */
+#define PV88080_BUCK3_ILIM_SHIFT 2
+#define PV88080_BUCK3_ILIM_MASK 0x0C
+#define PV88080_BUCK3_MODE_MASK 0x03
+
+#define PV88080_BUCK_MODE_SLEEP 0x00
+#define PV88080_BUCK_MODE_AUTO 0x01
+#define PV88080_BUCK_MODE_SYNC 0x02
+
+/* PV88080_REG_BUCK2_CONF2 (addr=0x61) */
+/* PV88080_REG_BUCK3_CONF2 (addr=0x69) */
+#define PV88080_BUCK_VDAC_RANGE_SHIFT 7
+#define PV88080_BUCK_VDAC_RANGE_MASK 0x01
+
+#define PV88080_BUCK_VDAC_RANGE_1 0x00
+#define PV88080_BUCK_VDAC_RANGE_2 0x01
+
+/* PV88080_REG_BUCK2_CONF5 (addr=0x64) */
+/* PV88080_REG_BUCK3_CONF5 (addr=0x6c) */
+#define PV88080_BUCK_VRANGE_GAIN_SHIFT 0
+#define PV88080_BUCK_VRANGE_GAIN_MASK 0x01
+
+#define PV88080_BUCK_VRANGE_GAIN_1 0x00
+#define PV88080_BUCK_VRANGE_GAIN_2 0x01
+
+#endif /* __PV88080_REGISTERS_H__ */
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 4689d62f4841..fafa3488e960 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -59,18 +59,18 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
- unsigned int pwm_reg_period;
+ struct pwm_args pargs;
int dutycycle;
int ret;
- pwm_reg_period = pwm_get_period(drvdata->pwm);
+ pwm_get_args(drvdata->pwm, &pargs);
- dutycycle = (pwm_reg_period *
+ dutycycle = (pargs.period *
drvdata->duty_cycle_table[selector].dutycycle) / 100;
- ret = pwm_config(drvdata->pwm, dutycycle, pwm_reg_period);
+ ret = pwm_config(drvdata->pwm, dutycycle, pargs.period);
if (ret) {
- dev_err(&rdev->dev, "Failed to configure PWM\n");
+ dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
@@ -113,18 +113,6 @@ static int pwm_regulator_is_enabled(struct regulator_dev *dev)
return pwm_is_enabled(drvdata->pwm);
}
-/**
- * Continuous voltage call-backs
- */
-static int pwm_voltage_to_duty_cycle_percentage(struct regulator_dev *rdev, int req_uV)
-{
- int min_uV = rdev->constraints->min_uV;
- int max_uV = rdev->constraints->max_uV;
- int diff = max_uV - min_uV;
-
- return ((req_uV * 100) - (min_uV * 100)) / diff;
-}
-
static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
@@ -138,21 +126,42 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
unsigned int ramp_delay = rdev->constraints->ramp_delay;
- unsigned int period = pwm_get_period(drvdata->pwm);
- int duty_cycle;
+ struct pwm_args pargs;
+ unsigned int req_diff = min_uV - rdev->constraints->min_uV;
+ unsigned int diff;
+ unsigned int duty_pulse;
+ u64 req_period;
+ u32 rem;
int ret;
- duty_cycle = pwm_voltage_to_duty_cycle_percentage(rdev, min_uV);
+ pwm_get_args(drvdata->pwm, &pargs);
+ diff = rdev->constraints->max_uV - rdev->constraints->min_uV;
+
+ /* First try to find out if we get the iduty cycle time which is
+ * factor of PWM period time. If (request_diff_to_min * pwm_period)
+ * is perfect divided by voltage_range_diff then it is possible to
+ * get duty cycle time which is factor of PWM period. This will help
+ * to get output voltage nearer to requested value as there is no
+ * calculation loss.
+ */
+ req_period = req_diff * pargs.period;
+ div_u64_rem(req_period, diff, &rem);
+ if (!rem) {
+ do_div(req_period, diff);
+ duty_pulse = (unsigned int)req_period;
+ } else {
+ duty_pulse = (pargs.period / 100) * ((req_diff * 100) / diff);
+ }
- ret = pwm_config(drvdata->pwm, (period / 100) * duty_cycle, period);
+ ret = pwm_config(drvdata->pwm, duty_pulse, pargs.period);
if (ret) {
- dev_err(&rdev->dev, "Failed to configure PWM\n");
+ dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
ret = pwm_enable(drvdata->pwm);
if (ret) {
- dev_err(&rdev->dev, "Failed to enable PWM\n");
+ dev_err(&rdev->dev, "Failed to enable PWM: %d\n", ret);
return ret;
}
drvdata->volt_uV = min_uV;
@@ -200,8 +209,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
if ((length < sizeof(*duty_cycle_table)) ||
(length % sizeof(*duty_cycle_table))) {
- dev_err(&pdev->dev,
- "voltage-table length(%d) is invalid\n",
+ dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
length);
return -EINVAL;
}
@@ -214,7 +222,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
(u32 *)duty_cycle_table,
length / sizeof(u32));
if (ret) {
- dev_err(&pdev->dev, "Failed to read voltage-table\n");
+ dev_err(&pdev->dev, "Failed to read voltage-table: %d\n", ret);
return ret;
}
@@ -277,16 +285,24 @@ static int pwm_regulator_probe(struct platform_device *pdev)
drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(drvdata->pwm)) {
- dev_err(&pdev->dev, "Failed to get PWM\n");
- return PTR_ERR(drvdata->pwm);
+ ret = PTR_ERR(drvdata->pwm);
+ dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret);
+ return ret;
}
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to the
+ * atomic PWM API.
+ */
+ pwm_apply_args(drvdata->pwm);
+
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
- dev_err(&pdev->dev, "Failed to register regulator %s\n",
- drvdata->desc.name);
- return PTR_ERR(regulator);
+ ret = PTR_ERR(regulator);
+ dev_err(&pdev->dev, "Failed to register regulator %s: %d\n",
+ drvdata->desc.name, ret);
+ return ret;
}
return 0;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 88a5dc88badc..84cce21e98cd 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -246,6 +246,7 @@ enum spmi_common_control_register_index {
/* Minimum voltage stepper delay for each step. */
#define SPMI_FTSMPS_STEP_DELAY 8
+#define SPMI_DEFAULT_STEP_DELAY 20
/*
* The ratio SPMI_FTSMPS_STEP_MARGIN_NUM/SPMI_FTSMPS_STEP_MARGIN_DEN is used to
@@ -254,13 +255,6 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS_STEP_MARGIN_NUM 4
#define SPMI_FTSMPS_STEP_MARGIN_DEN 5
-/*
- * This voltage in uV is returned by get_voltage functions when there is no way
- * to determine the current voltage level. It is needed because the regulator
- * framework treats a 0 uV voltage as an error.
- */
-#define VOLTAGE_UNKNOWN 1
-
/* VSET value to decide the range of ULT SMPS */
#define ULT_SMPS_RANGE_SPLIT 0x60
@@ -539,12 +533,12 @@ static int spmi_regulator_common_disable(struct regulator_dev *rdev)
}
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
- int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
- unsigned *selector)
+ int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
+ int selector, voltage_sel;
/* Check if request voltage is outside of physically settable range. */
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -570,14 +564,13 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
range_id = i;
range = &vreg->set_points->range[range_id];
- *range_sel = range->range_sel;
/*
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
- *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
- uV = *voltage_sel * range->step_uV + range->min_uV;
+ voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = voltage_sel * range->step_uV + range->min_uV;
if (uV > max_uV) {
dev_err(vreg->dev,
@@ -587,12 +580,48 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
return -EINVAL;
}
- *selector = 0;
+ selector = 0;
for (i = 0; i < range_id; i++)
- *selector += vreg->set_points->range[i].n_voltages;
- *selector += (uV - range->set_point_min_uV) / range->step_uV;
+ selector += vreg->set_points->range[i].n_voltages;
+ selector += (uV - range->set_point_min_uV) / range->step_uV;
- return 0;
+ return selector;
+}
+
+static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
+ unsigned selector, u8 *range_sel,
+ u8 *voltage_sel)
+{
+ const struct spmi_voltage_range *range, *end;
+
+ range = vreg->set_points->range;
+ end = range + vreg->set_points->count;
+
+ for (; range < end; range++) {
+ if (selector < range->n_voltages) {
+ *voltage_sel = selector;
+ *range_sel = range->range_sel;
+ return 0;
+ }
+
+ selector -= range->n_voltages;
+ }
+
+ return -EINVAL;
+}
+
+static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
+ const struct spmi_voltage_range *range)
+{
+ int sw_sel = hw_sel;
+ const struct spmi_voltage_range *r = vreg->set_points->range;
+
+ while (r != range) {
+ sw_sel += r->n_voltages;
+ r++;
+ }
+
+ return sw_sel;
}
static const struct spmi_voltage_range *
@@ -614,12 +643,11 @@ spmi_regulator_find_range(struct spmi_regulator *vreg)
}
static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
- int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
- unsigned *selector)
+ int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
- int i;
+ int i, selector;
range = spmi_regulator_find_range(vreg);
if (!range)
@@ -637,8 +665,8 @@ static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
- *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
- uV = *voltage_sel * range->step_uV + range->min_uV;
+ uV = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = uV * range->step_uV + range->min_uV;
if (uV > max_uV) {
/*
@@ -648,43 +676,49 @@ static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
goto different_range;
}
- *selector = 0;
+ selector = 0;
for (i = 0; i < vreg->set_points->count; i++) {
if (uV >= vreg->set_points->range[i].set_point_min_uV
&& uV <= vreg->set_points->range[i].set_point_max_uV) {
- *selector +=
+ selector +=
(uV - vreg->set_points->range[i].set_point_min_uV)
/ vreg->set_points->range[i].step_uV;
break;
}
- *selector += vreg->set_points->range[i].n_voltages;
+ selector += vreg->set_points->range[i].n_voltages;
}
- if (*selector >= vreg->set_points->n_voltages)
+ if (selector >= vreg->set_points->n_voltages)
goto different_range;
- return 0;
+ return selector;
different_range:
- return spmi_regulator_select_voltage(vreg, min_uV, max_uV,
- range_sel, voltage_sel, selector);
+ return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
}
-static int spmi_regulator_common_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int spmi_regulator_common_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- int ret;
- u8 buf[2];
- u8 range_sel, voltage_sel;
/*
* Favor staying in the current voltage range if possible. This avoids
* voltage spikes that occur when changing the voltage range.
*/
- ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
- &range_sel, &voltage_sel, selector);
+ return spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV);
+}
+
+static int
+spmi_regulator_common_set_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ int ret;
+ u8 buf[2];
+ u8 range_sel, voltage_sel;
+
+ ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
@@ -719,24 +753,24 @@ static int spmi_regulator_common_get_voltage(struct regulator_dev *rdev)
range = spmi_regulator_find_range(vreg);
if (!range)
- return VOLTAGE_UNKNOWN;
+ return -EINVAL;
- return range->step_uV * voltage_sel + range->min_uV;
+ return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
-static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int spmi_regulator_single_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- int ret;
- u8 range_sel, sel;
- ret = spmi_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &sel, selector);
- if (ret) {
- dev_err(vreg->dev, "could not set voltage, ret=%d\n", ret);
- return ret;
- }
+ return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
+}
+
+static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 sel = selector;
/*
* Certain types of regulators do not have a range select register so
@@ -748,27 +782,24 @@ static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
static int spmi_regulator_single_range_get_voltage(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- const struct spmi_voltage_range *range = vreg->set_points->range;
- u8 voltage_sel;
+ u8 selector;
+ int ret;
- spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &voltage_sel, 1);
+ ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &selector, 1);
+ if (ret)
+ return ret;
- return range->step_uV * voltage_sel + range->min_uV;
+ return selector;
}
static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+ unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int ret;
u8 range_sel, voltage_sel;
- /*
- * Favor staying in the current voltage range if possible. This avoids
- * voltage spikes that occur when changing the voltage range.
- */
- ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
- &range_sel, &voltage_sel, selector);
+ ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
@@ -783,7 +814,7 @@ static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
voltage_sel |= ULT_SMPS_RANGE_SPLIT;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_VOLTAGE_SET,
- voltage_sel, 0xff);
+ voltage_sel, 0xff);
}
static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
@@ -796,12 +827,12 @@ static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
range = spmi_regulator_find_range(vreg);
if (!range)
- return VOLTAGE_UNKNOWN;
+ return -EINVAL;
if (range->range_sel == 1)
voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
- return range->step_uV * voltage_sel + range->min_uV;
+ return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
static int spmi_regulator_common_list_voltage(struct regulator_dev *rdev,
@@ -1007,8 +1038,10 @@ static struct regulator_ops spmi_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1020,8 +1053,9 @@ static struct regulator_ops spmi_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1036,8 +1070,9 @@ static struct regulator_ops spmi_ln_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_bypass = spmi_regulator_common_set_bypass,
.get_bypass = spmi_regulator_common_get_bypass,
@@ -1056,8 +1091,9 @@ static struct regulator_ops spmi_boost_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_input_current_limit = spmi_regulator_set_ilim,
};
@@ -1066,9 +1102,10 @@ static struct regulator_ops spmi_ftsmps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1080,8 +1117,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_ult_lo_smps_set_voltage,
- .get_voltage = spmi_regulator_ult_lo_smps_get_voltage,
+ .set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1093,8 +1131,10 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1106,8 +1146,9 @@ static struct regulator_ops spmi_ult_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1201,7 +1242,7 @@ static int spmi_regulator_match(struct spmi_regulator *vreg, u16 force_type)
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_DIG_MAJOR_REV, version,
ARRAY_SIZE(version));
if (ret) {
- dev_err(vreg->dev, "could not read version registers\n");
+ dev_dbg(vreg->dev, "could not read version registers\n");
return ret;
}
dig_major_rev = version[SPMI_COMMON_REG_DIG_MAJOR_REV
@@ -1245,11 +1286,11 @@ found:
return 0;
}
-static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
+static int spmi_regulator_init_slew_rate(struct spmi_regulator *vreg)
{
int ret;
u8 reg = 0;
- int step, delay, slew_rate;
+ int step, delay, slew_rate, step_delay;
const struct spmi_voltage_range *range;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_STEP_CTRL, &reg, 1);
@@ -1262,6 +1303,15 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
if (!range)
return -EINVAL;
+ switch (vreg->logical_type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ step_delay = SPMI_FTSMPS_STEP_DELAY;
+ break;
+ default:
+ step_delay = SPMI_DEFAULT_STEP_DELAY;
+ break;
+ }
+
step = reg & SPMI_FTSMPS_STEP_CTRL_STEP_MASK;
step >>= SPMI_FTSMPS_STEP_CTRL_STEP_SHIFT;
@@ -1270,7 +1320,7 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
/* slew_rate has units of uV/us */
slew_rate = SPMI_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
- slew_rate /= 1000 * (SPMI_FTSMPS_STEP_DELAY << delay);
+ slew_rate /= 1000 * (step_delay << delay);
slew_rate *= SPMI_FTSMPS_STEP_MARGIN_NUM;
slew_rate /= SPMI_FTSMPS_STEP_MARGIN_DEN;
@@ -1411,10 +1461,16 @@ static int spmi_regulator_of_parse(struct device_node *node,
return ret;
}
- if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) {
- ret = spmi_regulator_ftsmps_init_slew_rate(vreg);
+ switch (vreg->logical_type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
+ ret = spmi_regulator_init_slew_rate(vreg);
if (ret)
return ret;
+ default:
+ break;
}
if (vreg->logical_type != SPMI_REGULATOR_LOGICAL_TYPE_VS)
@@ -1510,10 +1566,61 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8994_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "s7", 0x2600, "vdd_s7", },
+ { "s8", 0x2900, "vdd_s8", },
+ { "s9", 0x2c00, "vdd_s9", },
+ { "s10", 0x2f00, "vdd_s10", },
+ { "s11", 0x3200, "vdd_s11", },
+ { "s12", 0x3500, "vdd_s12", },
+ { "l1", 0x4000, "vdd_l1", },
+ { "l2", 0x4100, "vdd_l2_l26_l28", },
+ { "l3", 0x4200, "vdd_l3_l11", },
+ { "l4", 0x4300, "vdd_l4_l27_l31", },
+ { "l5", 0x4400, "vdd_l5_l7", },
+ { "l6", 0x4500, "vdd_l6_l12_l32", },
+ { "l7", 0x4600, "vdd_l5_l7", },
+ { "l8", 0x4700, "vdd_l8_l16_l30", },
+ { "l9", 0x4800, "vdd_l9_l10_l18_l22", },
+ { "l10", 0x4900, "vdd_l9_l10_l18_l22", },
+ { "l11", 0x4a00, "vdd_l3_l11", },
+ { "l12", 0x4b00, "vdd_l6_l12_l32", },
+ { "l13", 0x4c00, "vdd_l13_l19_l23_l24", },
+ { "l14", 0x4d00, "vdd_l14_l15", },
+ { "l15", 0x4e00, "vdd_l14_l15", },
+ { "l16", 0x4f00, "vdd_l8_l16_l30", },
+ { "l17", 0x5000, "vdd_l17_l29", },
+ { "l18", 0x5100, "vdd_l9_l10_l18_l22", },
+ { "l19", 0x5200, "vdd_l13_l19_l23_l24", },
+ { "l20", 0x5300, "vdd_l20_l21", },
+ { "l21", 0x5400, "vdd_l20_l21", },
+ { "l22", 0x5500, "vdd_l9_l10_l18_l22", },
+ { "l23", 0x5600, "vdd_l13_l19_l23_l24", },
+ { "l24", 0x5700, "vdd_l13_l19_l23_l24", },
+ { "l25", 0x5800, "vdd_l25", },
+ { "l26", 0x5900, "vdd_l2_l26_l28", },
+ { "l27", 0x5a00, "vdd_l4_l27_l31", },
+ { "l28", 0x5b00, "vdd_l2_l26_l28", },
+ { "l29", 0x5c00, "vdd_l17_l29", },
+ { "l30", 0x5d00, "vdd_l8_l16_l30", },
+ { "l31", 0x5e00, "vdd_l4_l27_l31", },
+ { "l32", 0x5f00, "vdd_l6_l12_l32", },
+ { "lvs1", 0x8000, "vdd_lvs_1_2", },
+ { "lvs2", 0x8100, "vdd_lvs_1_2", },
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
+ { .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
@@ -1573,7 +1680,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
ret = spmi_regulator_match(vreg, reg->force_type);
if (ret)
- goto err;
+ continue;
config.dev = dev;
config.driver_data = vreg;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index d86a3dcd61e2..40d07ba036e7 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -55,6 +55,42 @@
/* max steps for increase voltage of Buck1/2, equal 100mv*/
#define MAX_STEPS_ONE_TIME 8
+#define RK8XX_DESC(_id, _match, _supply, _min, _max, _step, _vreg, \
+ _vmask, _ereg, _emask, _etime) \
+ [_id] = { \
+ .name = (_match), \
+ .supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = (_id), \
+ .n_voltages = (((_max) - (_min)) / (_step) + 1), \
+ .owner = THIS_MODULE, \
+ .min_uV = (_min) * 1000, \
+ .uV_step = (_step) * 1000, \
+ .vsel_reg = (_vreg), \
+ .vsel_mask = (_vmask), \
+ .enable_reg = (_ereg), \
+ .enable_mask = (_emask), \
+ .enable_time = (_etime), \
+ .ops = &rk808_reg_ops, \
+ }
+
+#define RK8XX_DESC_SWITCH(_id, _match, _supply, _ereg, _emask) \
+ [_id] = { \
+ .name = (_match), \
+ .supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = (_id), \
+ .enable_reg = (_ereg), \
+ .enable_mask = (_emask), \
+ .owner = THIS_MODULE, \
+ .ops = &rk808_switch_ops \
+ }
+
+
struct rk808_regulator_data {
struct gpio_desc *dvs_gpio[2];
};
@@ -66,27 +102,11 @@ static const int rk808_buck_config_regs[] = {
RK808_BUCK4_CONFIG_REG,
};
-static const struct regulator_linear_range rk808_buck_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(712500, 0, 63, 12500),
-};
-
-static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(1800000, 0, 15, 100000),
-};
-
-static const struct regulator_linear_range rk808_ldo_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(1800000, 0, 16, 100000),
-};
-
static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000),
REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0),
};
-static const struct regulator_linear_range rk808_ldo6_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000),
-};
-
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
@@ -242,6 +262,21 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
{
unsigned int reg;
+ int sel = regulator_map_voltage_linear(rdev, uv, uv);
+
+ if (sel < 0)
+ return -EINVAL;
+
+ reg = rdev->desc->vsel_reg + RK808_SLP_REG_OFFSET;
+
+ return regmap_update_bits(rdev->regmap, reg,
+ rdev->desc->vsel_mask,
+ sel);
+}
+
+static int rk808_set_suspend_voltage_range(struct regulator_dev *rdev, int uv)
+{
+ unsigned int reg;
int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
if (sel < 0)
@@ -277,8 +312,8 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev)
}
static struct regulator_ops rk808_buck1_2_ops = {
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap,
.set_voltage_sel = rk808_buck1_2_set_voltage_sel,
.set_voltage_time_sel = rk808_buck1_2_set_voltage_time_sel,
@@ -292,6 +327,19 @@ static struct regulator_ops rk808_buck1_2_ops = {
};
static struct regulator_ops rk808_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_voltage = rk808_set_suspend_voltage,
+ .set_suspend_enable = rk808_set_suspend_enable,
+ .set_suspend_disable = rk808_set_suspend_disable,
+};
+
+static struct regulator_ops rk808_reg_ops_ranges = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -299,7 +347,7 @@ static struct regulator_ops rk808_reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
- .set_suspend_voltage = rk808_set_suspend_voltage,
+ .set_suspend_voltage = rk808_set_suspend_voltage_range,
.set_suspend_enable = rk808_set_suspend_enable,
.set_suspend_disable = rk808_set_suspend_disable,
};
@@ -316,12 +364,14 @@ static const struct regulator_desc rk808_reg[] = {
{
.name = "DCDC_REG1",
.supply_name = "vcc1",
+ .of_match = of_match_ptr("DCDC_REG1"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_DCDC1,
.ops = &rk808_buck1_2_ops,
.type = REGULATOR_VOLTAGE,
+ .min_uV = 712500,
+ .uV_step = 12500,
.n_voltages = 64,
- .linear_ranges = rk808_buck_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
.vsel_reg = RK808_BUCK1_ON_VSEL_REG,
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
@@ -330,12 +380,14 @@ static const struct regulator_desc rk808_reg[] = {
}, {
.name = "DCDC_REG2",
.supply_name = "vcc2",
+ .of_match = of_match_ptr("DCDC_REG2"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_DCDC2,
.ops = &rk808_buck1_2_ops,
.type = REGULATOR_VOLTAGE,
+ .min_uV = 712500,
+ .uV_step = 12500,
.n_voltages = 64,
- .linear_ranges = rk808_buck_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
.vsel_reg = RK808_BUCK2_ON_VSEL_REG,
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
@@ -344,6 +396,8 @@ static const struct regulator_desc rk808_reg[] = {
}, {
.name = "DCDC_REG3",
.supply_name = "vcc3",
+ .of_match = of_match_ptr("DCDC_REG3"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_DCDC3,
.ops = &rk808_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -351,55 +405,23 @@ static const struct regulator_desc rk808_reg[] = {
.enable_reg = RK808_DCDC_EN_REG,
.enable_mask = BIT(2),
.owner = THIS_MODULE,
- }, {
- .name = "DCDC_REG4",
- .supply_name = "vcc4",
- .id = RK808_ID_DCDC4,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 16,
- .linear_ranges = rk808_buck4_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_buck4_voltage_ranges),
- .vsel_reg = RK808_BUCK4_ON_VSEL_REG,
- .vsel_mask = RK808_BUCK4_VSEL_MASK,
- .enable_reg = RK808_DCDC_EN_REG,
- .enable_mask = BIT(3),
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG1",
- .supply_name = "vcc6",
- .id = RK808_ID_LDO1,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO1_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(0),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG2",
- .supply_name = "vcc6",
- .id = RK808_ID_LDO2,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO2_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(1),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
+ },
+ RK8XX_DESC(RK808_ID_DCDC4, "DCDC_REG4", "vcc4", 1800, 3300, 100,
+ RK808_BUCK4_ON_VSEL_REG, RK808_BUCK4_VSEL_MASK,
+ RK808_DCDC_EN_REG, BIT(3), 0),
+ RK8XX_DESC(RK808_ID_LDO1, "LDO_REG1", "vcc6", 1800, 3400, 100,
+ RK808_LDO1_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(0), 400),
+ RK8XX_DESC(RK808_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
+ RK808_LDO2_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(1), 400),
+ {
.name = "LDO_REG3",
.supply_name = "vcc7",
+ .of_match = of_match_ptr("LDO_REG3"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_LDO3,
- .ops = &rk808_reg_ops,
+ .ops = &rk808_reg_ops_ranges,
.type = REGULATOR_VOLTAGE,
.n_voltages = 16,
.linear_ranges = rk808_ldo3_voltage_ranges,
@@ -410,117 +432,26 @@ static const struct regulator_desc rk808_reg[] = {
.enable_mask = BIT(2),
.enable_time = 400,
.owner = THIS_MODULE,
- }, {
- .name = "LDO_REG4",
- .supply_name = "vcc9",
- .id = RK808_ID_LDO4,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO4_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(3),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG5",
- .supply_name = "vcc9",
- .id = RK808_ID_LDO5,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO5_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(4),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG6",
- .supply_name = "vcc10",
- .id = RK808_ID_LDO6,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 18,
- .linear_ranges = rk808_ldo6_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
- .vsel_reg = RK808_LDO6_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(5),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG7",
- .supply_name = "vcc7",
- .id = RK808_ID_LDO7,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 18,
- .linear_ranges = rk808_ldo6_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
- .vsel_reg = RK808_LDO7_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(6),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG8",
- .supply_name = "vcc11",
- .id = RK808_ID_LDO8,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO8_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(7),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "SWITCH_REG1",
- .supply_name = "vcc8",
- .id = RK808_ID_SWITCH1,
- .ops = &rk808_switch_ops,
- .type = REGULATOR_VOLTAGE,
- .enable_reg = RK808_DCDC_EN_REG,
- .enable_mask = BIT(5),
- .owner = THIS_MODULE,
- }, {
- .name = "SWITCH_REG2",
- .supply_name = "vcc12",
- .id = RK808_ID_SWITCH2,
- .ops = &rk808_switch_ops,
- .type = REGULATOR_VOLTAGE,
- .enable_reg = RK808_DCDC_EN_REG,
- .enable_mask = BIT(6),
- .owner = THIS_MODULE,
},
-};
-
-static struct of_regulator_match rk808_reg_matches[] = {
- [RK808_ID_DCDC1] = { .name = "DCDC_REG1" },
- [RK808_ID_DCDC2] = { .name = "DCDC_REG2" },
- [RK808_ID_DCDC3] = { .name = "DCDC_REG3" },
- [RK808_ID_DCDC4] = { .name = "DCDC_REG4" },
- [RK808_ID_LDO1] = { .name = "LDO_REG1" },
- [RK808_ID_LDO2] = { .name = "LDO_REG2" },
- [RK808_ID_LDO3] = { .name = "LDO_REG3" },
- [RK808_ID_LDO4] = { .name = "LDO_REG4" },
- [RK808_ID_LDO5] = { .name = "LDO_REG5" },
- [RK808_ID_LDO6] = { .name = "LDO_REG6" },
- [RK808_ID_LDO7] = { .name = "LDO_REG7" },
- [RK808_ID_LDO8] = { .name = "LDO_REG8" },
- [RK808_ID_SWITCH1] = { .name = "SWITCH_REG1" },
- [RK808_ID_SWITCH2] = { .name = "SWITCH_REG2" },
+ RK8XX_DESC(RK808_ID_LDO4, "LDO_REG4", "vcc9", 1800, 3400, 100,
+ RK808_LDO4_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(3), 400),
+ RK8XX_DESC(RK808_ID_LDO5, "LDO_REG5", "vcc9", 1800, 3400, 100,
+ RK808_LDO5_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(4), 400),
+ RK8XX_DESC(RK808_ID_LDO6, "LDO_REG6", "vcc10", 800, 2500, 100,
+ RK808_LDO6_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(5), 400),
+ RK8XX_DESC(RK808_ID_LDO7, "LDO_REG7", "vcc7", 800, 2500, 100,
+ RK808_LDO7_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(6), 400),
+ RK8XX_DESC(RK808_ID_LDO8, "LDO_REG8", "vcc11", 1800, 3400, 100,
+ RK808_LDO8_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(7), 400),
+ RK8XX_DESC_SWITCH(RK808_ID_SWITCH1, "SWITCH_REG1", "vcc8",
+ RK808_DCDC_EN_REG, BIT(5)),
+ RK8XX_DESC_SWITCH(RK808_ID_SWITCH2, "SWITCH_REG2", "vcc12",
+ RK808_DCDC_EN_REG, BIT(6)),
};
static int rk808_regulator_dt_parse_pdata(struct device *dev,
@@ -529,17 +460,12 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
struct rk808_regulator_data *pdata)
{
struct device_node *np;
- int tmp, ret, i;
+ int tmp, ret = 0, i;
np = of_get_child_by_name(client_dev->of_node, "regulators");
if (!np)
return -ENXIO;
- ret = of_regulator_match(dev, np, rk808_reg_matches,
- RK808_NUM_REGULATORS);
- if (ret < 0)
- goto dt_parse_end;
-
for (i = 0; i < ARRAY_SIZE(pdata->dvs_gpio); i++) {
pdata->dvs_gpio[i] =
devm_gpiod_get_index_optional(client_dev, "dvs", i,
@@ -586,18 +512,12 @@ static int rk808_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
+ config.dev = &client->dev;
+ config.driver_data = pdata;
+ config.regmap = rk808->regmap;
+
/* Instantiate the regulators */
for (i = 0; i < RK808_NUM_REGULATORS; i++) {
- if (!rk808_reg_matches[i].init_data ||
- !rk808_reg_matches[i].of_node)
- continue;
-
- config.dev = &client->dev;
- config.driver_data = pdata;
- config.regmap = rk808->regmap;
- config.of_node = rk808_reg_matches[i].of_node;
- config.init_data = rk808_reg_matches[i].init_data;
-
rk808_rdev = devm_regulator_register(&pdev->dev,
&rk808_reg[i], &config);
if (IS_ERR(rk808_rdev)) {
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 6dfa3502e1f1..02fb6b4ea820 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -267,6 +267,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.ops = &s2mps11_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .ramp_delay = RAMP_DELAY_12_MVUS, \
.min_uV = MIN_800_MV, \
.uV_step = step, \
.n_voltages = S2MPS11_LDO_N_VOLTAGES, \
@@ -1237,17 +1238,7 @@ static struct platform_driver s2mps11_pmic_driver = {
.id_table = s2mps11_pmic_id,
};
-static int __init s2mps11_pmic_init(void)
-{
- return platform_driver_register(&s2mps11_pmic_driver);
-}
-subsys_initcall(s2mps11_pmic_init);
-
-static void __exit s2mps11_pmic_exit(void)
-{
- platform_driver_unregister(&s2mps11_pmic_driver);
-}
-module_exit(s2mps11_pmic_exit);
+module_platform_driver(s2mps11_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9d6ea3a4dccd..67cac2682f50 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -600,7 +600,7 @@ static int pmic_probe(struct spi_device *spi)
memset(hw, 0, sizeof(struct tps6524x));
hw->dev = dev;
- hw->spi = spi_dev_get(spi);
+ hw->spi = spi;
mutex_init(&hw->lock);
for (i = 0; i < N_REGULATORS; i++, info++, init_data++) {
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 955a6fb1355c..faeb5ee92c9e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -21,7 +21,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/i2c/twl.h>
-
+#include <linux/delay.h>
/*
* The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a
@@ -188,6 +188,74 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
return grp && (val == TWL6030_CFG_STATE_ON);
}
+#define PB_I2C_BUSY BIT(0)
+#define PB_I2C_BWEN BIT(1)
+
+/* Wait until buffer empty/ready to send a word on power bus. */
+static int twl4030_wait_pb_ready(void)
+{
+
+ int ret;
+ int timeout = 10;
+ u8 val;
+
+ do {
+ ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
+ TWL4030_PM_MASTER_PB_CFG);
+ if (ret < 0)
+ return ret;
+
+ if (!(val & PB_I2C_BUSY))
+ return 0;
+
+ mdelay(1);
+ timeout--;
+ } while (timeout);
+
+ return -ETIMEDOUT;
+}
+
+/* Send a word over the powerbus */
+static int twl4030_send_pb_msg(unsigned msg)
+{
+ u8 val;
+ int ret;
+
+ /* save powerbus configuration */
+ ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
+ TWL4030_PM_MASTER_PB_CFG);
+ if (ret < 0)
+ return ret;
+
+ /* Enable i2c access to powerbus */
+ ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val | PB_I2C_BWEN,
+ TWL4030_PM_MASTER_PB_CFG);
+ if (ret < 0)
+ return ret;
+
+ ret = twl4030_wait_pb_ready();
+ if (ret < 0)
+ return ret;
+
+ ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, msg >> 8,
+ TWL4030_PM_MASTER_PB_WORD_MSB);
+ if (ret < 0)
+ return ret;
+
+ ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, msg & 0xff,
+ TWL4030_PM_MASTER_PB_WORD_LSB);
+ if (ret < 0)
+ return ret;
+
+ ret = twl4030_wait_pb_ready();
+ if (ret < 0)
+ return ret;
+
+ /* Restore powerbus configuration */
+ return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val,
+ TWL4030_PM_MASTER_PB_CFG);
+}
+
static int twl4030reg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -303,7 +371,6 @@ static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
unsigned message;
- int status;
/* We can only set the mode through state machine commands... */
switch (mode) {
@@ -317,20 +384,19 @@ static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
return -EINVAL;
}
- /* Ensure the resource is associated with some group */
- status = twlreg_grp(rdev);
- if (status < 0)
- return status;
- if (!(status & (P3_GRP_4030 | P2_GRP_4030 | P1_GRP_4030)))
- return -EACCES;
-
- status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
- message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB);
- if (status < 0)
- return status;
+ return twl4030_send_pb_msg(message);
+}
- return twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
- message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
+static inline unsigned int twl4030reg_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RES_STATE_ACTIVE:
+ return REGULATOR_MODE_NORMAL;
+ case RES_STATE_SLEEP:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return -EINVAL;
+ }
}
static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
@@ -835,10 +901,11 @@ static struct regulator_ops twlsmps_ops = {
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf, TWL4030, twl4030fixed_ops)
+ remap_conf, TWL4030, twl4030fixed_ops, \
+ twl4030reg_map_mode)
#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
- 0x0, TWL6030, twl6030fixed_ops)
+ 0x0, TWL6030, twl6030fixed_ops, 0x0)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
static const struct twlreg_info TWL4030_INFO_##label = { \
@@ -855,6 +922,7 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.enable_time = turnon_delay, \
+ .of_map_mode = twl4030reg_map_mode, \
}, \
}
@@ -870,6 +938,7 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.enable_time = turnon_delay, \
+ .of_map_mode = twl4030reg_map_mode, \
}, \
}
@@ -915,7 +984,7 @@ static const struct twlreg_info TWL6032_INFO_##label = { \
}
#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
- family, operations) \
+ family, operations, map_mode) \
static const struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = num, \
@@ -930,6 +999,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
.owner = THIS_MODULE, \
.min_uV = mVolts * 1000, \
.enable_time = turnon_delay, \
+ .of_map_mode = map_mode, \
}, \
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f52b74cf8d1e..428c03ef02b2 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -137,15 +137,15 @@ static const char *sd_cache_types[] = {
static void sd_set_flush_flag(struct scsi_disk *sdkp)
{
- unsigned flush = 0;
+ bool wc = false, fua = false;
if (sdkp->WCE) {
- flush |= REQ_FLUSH;
+ wc = true;
if (sdkp->DPOFUA)
- flush |= REQ_FUA;
+ fua = true;
}
- blk_queue_flush(sdkp->disk->queue, flush);
+ blk_queue_write_cache(sdkp->disk->queue, wc, fua);
}
static ssize_t
@@ -779,7 +779,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
* discarded on disk. This allows us to report completion on the full
* amount of blocks described by the request.
*/
- blk_add_request_payload(rq, page, len);
+ blk_add_request_payload(rq, page, 0, len);
ret = scsi_init_io(cmd);
rq->__data_len = nr_bytes;
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index f324451e0940..f9d7a85b2822 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -275,7 +275,7 @@ check_spm:
return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
}
-static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
+static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
.suspend = qcom_idle_enter,
.init = qcom_cpuidle_init,
};
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index e4c82883e580..7a0a67f3c4e7 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1865,7 +1865,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
int api32 = ll_need_32bit_api(sbi);
loff_t ret = -EINVAL;
- inode_lock(inode);
switch (origin) {
case SEEK_SET:
break;
@@ -1903,7 +1902,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
goto out;
out:
- inode_unlock(inode);
return ret;
}
@@ -1922,7 +1920,7 @@ const struct file_operations ll_dir_operations = {
.open = ll_dir_open,
.release = ll_dir_release,
.read = generic_read_dir,
- .iterate = ll_readdir,
+ .iterate_shared = ll_readdir,
.unlocked_ioctl = ll_dir_ioctl,
.fsync = ll_fsync,
};
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index e3c0f1dd4d31..65a6acec663b 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1042,8 +1042,8 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
/* llite/xattr.c */
int ll_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size);
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size);
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ll_removexattr(struct dentry *dentry, const char *name);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 69aa15e8e3ef..0c3459c1a518 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -358,14 +358,14 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
*/
#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
- loff_t file_offset)
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
{
struct lu_env *env;
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ccc_object *obj = cl_inode2ccc(inode);
+ loff_t file_offset = iocb->ki_pos;
ssize_t count = iov_iter_count(iter);
ssize_t tot_bytes = 0, result = 0;
struct ll_inode_info *lli = ll_i2info(inode);
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index b68dcc921ca2..c671f221c28c 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -451,11 +451,9 @@ out:
return rc;
}
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
{
- struct inode *inode = d_inode(dentry);
-
LASSERT(inode);
LASSERT(name);
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 5fbda7b218c7..9cf4f8463c4e 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -2425,7 +2425,7 @@ static __init uint32_t visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
- if (cpu_has_hypervisor) {
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
/* check the ID */
cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
return (ebx == UNISYS_SPAR_ID_EBX) &&
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 026a758e5778..7c4efb4417b0 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -687,10 +687,10 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* Force writethrough using WRITE_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
- if (q->flush_flags & REQ_FUA) {
+ if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
rw = WRITE_FUA;
- else if (!(q->flush_flags & REQ_FLUSH))
+ else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -836,7 +836,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
struct block_device *bd = ib_dev->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
- return q->flush_flags & REQ_FLUSH;
+ return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
}
static const struct target_backend_ops iblock_ops = {
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 3c3dc4a3d52c..d89d60c8b6cf 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -338,31 +338,9 @@ config INTEL_QUARK_DTS_THERMAL
hot & critical. The critical trip point default value is set by
underlying BIOS/Firmware.
-config INT340X_THERMAL
- tristate "ACPI INT340X thermal drivers"
- depends on X86 && ACPI
- select THERMAL_GOV_USER_SPACE
- select ACPI_THERMAL_REL
- select ACPI_FAN
- select INTEL_SOC_DTS_IOSF_CORE
- select THERMAL_WRITABLE_TRIPS
- help
- Newer laptops and tablets that use ACPI may have thermal sensors and
- other devices with thermal control capabilities outside the core
- CPU/SOC, for thermal safety reasons.
- They are exposed for the OS to use via the INT3400 ACPI device object
- as the master, and INT3401~INT340B ACPI device objects as the slaves.
- Enable this to expose the temperature information and cooling ability
- from these objects to userspace via the normal thermal framework.
- This means that a wide range of applications and GUI widgets can show
- the information to the user or use this information for making
- decisions. For example, the Intel Thermal Daemon can use this
- information to allow the user to select his laptop to run without
- turning on the fans.
-
-config ACPI_THERMAL_REL
- tristate
- depends on ACPI
+menu "ACPI INT340X thermal drivers"
+source drivers/thermal/int340x_thermal/Kconfig
+endmenu
config INTEL_PCH_THERMAL
tristate "Intel PCH Thermal Reporting Driver"
diff --git a/drivers/thermal/int340x_thermal/Kconfig b/drivers/thermal/int340x_thermal/Kconfig
new file mode 100644
index 000000000000..0582bd12a239
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/Kconfig
@@ -0,0 +1,42 @@
+#
+# ACPI INT340x thermal drivers configuration
+#
+
+config INT340X_THERMAL
+ tristate "ACPI INT340X thermal drivers"
+ depends on X86 && ACPI
+ select THERMAL_GOV_USER_SPACE
+ select ACPI_THERMAL_REL
+ select ACPI_FAN
+ select INTEL_SOC_DTS_IOSF_CORE
+ help
+ Newer laptops and tablets that use ACPI may have thermal sensors and
+ other devices with thermal control capabilities outside the core
+ CPU/SOC, for thermal safety reasons.
+ They are exposed for the OS to use via the INT3400 ACPI device object
+ as the master, and INT3401~INT340B ACPI device objects as the slaves.
+ Enable this to expose the temperature information and cooling ability
+ from these objects to userspace via the normal thermal framework.
+ This means that a wide range of applications and GUI widgets can show
+ the information to the user or use this information for making
+ decisions. For example, the Intel Thermal Daemon can use this
+ information to allow the user to select his laptop to run without
+ turning on the fans.
+
+config ACPI_THERMAL_REL
+ tristate
+ depends on ACPI
+
+if INT340X_THERMAL
+
+config INT3406_THERMAL
+ tristate "ACPI INT3406 display thermal driver"
+ depends on ACPI_VIDEO
+ help
+ The display thermal device represents the LED/LCD display panel
+ that may or may not include touch support. The main function of
+ the display thermal device is to allow control of the display
+ brightness in order to address a thermal condition or to reduce
+ power consumed by display device.
+
+endif
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ba77a34f659f..df0df055e7ff 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -3,4 +3,5 @@ obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
+obj-$(CONFIG_INT3406_THERMAL) += int3406_thermal.o
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
new file mode 100644
index 000000000000..13d431cbd29e
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -0,0 +1,236 @@
+/*
+ * INT3406 thermal driver for display participant device
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Authors: Aaron Lu <aaron.lu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/thermal.h>
+#include <acpi/video.h>
+
+#define INT3406_BRIGHTNESS_LIMITS_CHANGED 0x80
+
+struct int3406_thermal_data {
+ int upper_limit;
+ int upper_limit_index;
+ int lower_limit;
+ int lower_limit_index;
+ acpi_handle handle;
+ struct acpi_video_device_brightness *br;
+ struct backlight_device *raw_bd;
+ struct thermal_cooling_device *cooling_dev;
+};
+
+static int int3406_thermal_to_raw(int level, struct int3406_thermal_data *d)
+{
+ int max_level = d->br->levels[d->br->count - 1];
+ int raw_max = d->raw_bd->props.max_brightness;
+
+ return level * raw_max / max_level;
+}
+
+static int int3406_thermal_to_acpi(int level, struct int3406_thermal_data *d)
+{
+ int raw_max = d->raw_bd->props.max_brightness;
+ int max_level = d->br->levels[d->br->count - 1];
+
+ return level * max_level / raw_max;
+}
+
+static int
+int3406_thermal_get_max_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int index = d->lower_limit_index ? d->lower_limit_index : 2;
+
+ *state = d->br->count - 1 - index;
+ return 0;
+}
+
+static int
+int3406_thermal_set_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int level, raw_level;
+
+ if (state > d->br->count - 3)
+ return -EINVAL;
+
+ state = d->br->count - 1 - state;
+ level = d->br->levels[state];
+
+ if ((d->upper_limit && level > d->upper_limit) ||
+ (d->lower_limit && level < d->lower_limit))
+ return -EINVAL;
+
+ raw_level = int3406_thermal_to_raw(level, d);
+ return backlight_device_set_brightness(d->raw_bd, raw_level);
+}
+
+static int
+int3406_thermal_get_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int raw_level, level, i;
+ int *levels = d->br->levels;
+
+ raw_level = d->raw_bd->props.brightness;
+ level = int3406_thermal_to_acpi(raw_level, d);
+
+ /*
+ * There is no 1:1 mapping between the firmware interface level with the
+ * raw interface level, we will have to find one that is close enough.
+ */
+ for (i = 2; i < d->br->count; i++) {
+ if (level < levels[i]) {
+ if (i == 2)
+ break;
+ if ((level - levels[i - 1]) < (levels[i] - level))
+ i--;
+ break;
+ }
+ }
+
+ *state = d->br->count - 1 - i;
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops video_cooling_ops = {
+ .get_max_state = int3406_thermal_get_max_state,
+ .get_cur_state = int3406_thermal_get_cur_state,
+ .set_cur_state = int3406_thermal_set_cur_state,
+};
+
+static int int3406_thermal_get_index(int *array, int nr, int value)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ if (array[i] == value)
+ break;
+ }
+ return i == nr ? -ENOENT : i;
+}
+
+static void int3406_thermal_get_limit(struct int3406_thermal_data *d)
+{
+ acpi_status status;
+ unsigned long long lower_limit, upper_limit;
+ int index;
+
+ status = acpi_evaluate_integer(d->handle, "DDDL", NULL, &lower_limit);
+ if (ACPI_SUCCESS(status)) {
+ index = int3406_thermal_get_index(d->br->levels, d->br->count,
+ lower_limit);
+ if (index > 0) {
+ d->lower_limit = (int)lower_limit;
+ d->lower_limit_index = index;
+ }
+ }
+
+ status = acpi_evaluate_integer(d->handle, "DDPC", NULL, &upper_limit);
+ if (ACPI_SUCCESS(status)) {
+ index = int3406_thermal_get_index(d->br->levels, d->br->count,
+ upper_limit);
+ if (index > 0) {
+ d->upper_limit = (int)upper_limit;
+ d->upper_limit_index = index;
+ }
+ }
+}
+
+static void int3406_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event == INT3406_BRIGHTNESS_LIMITS_CHANGED)
+ int3406_thermal_get_limit(data);
+}
+
+static int int3406_thermal_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3406_thermal_data *d;
+ struct backlight_device *bd;
+ int ret;
+
+ if (!ACPI_HANDLE(&pdev->dev))
+ return -ENODEV;
+
+ d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ d->handle = ACPI_HANDLE(&pdev->dev);
+
+ bd = backlight_device_get_by_type(BACKLIGHT_RAW);
+ if (!bd)
+ return -ENODEV;
+ d->raw_bd = bd;
+
+ ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br);
+ if (ret)
+ return ret;
+
+ int3406_thermal_get_limit(d);
+
+ d->cooling_dev = thermal_cooling_device_register(acpi_device_bid(adev),
+ d, &video_cooling_ops);
+ if (IS_ERR(d->cooling_dev))
+ goto err;
+
+ ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ int3406_notify, d);
+ if (ret)
+ goto err_cdev;
+
+ platform_set_drvdata(pdev, d);
+
+ return 0;
+
+err_cdev:
+ thermal_cooling_device_unregister(d->cooling_dev);
+err:
+ kfree(d->br);
+ return -ENODEV;
+}
+
+static int int3406_thermal_remove(struct platform_device *pdev)
+{
+ struct int3406_thermal_data *d = platform_get_drvdata(pdev);
+
+ thermal_cooling_device_unregister(d->cooling_dev);
+ kfree(d->br);
+ return 0;
+}
+
+static const struct acpi_device_id int3406_thermal_match[] = {
+ {"INT3406", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, int3406_thermal_match);
+
+static struct platform_driver int3406_thermal_driver = {
+ .probe = int3406_thermal_probe,
+ .remove = int3406_thermal_remove,
+ .driver = {
+ .name = "int3406 thermal",
+ .owner = THIS_MODULE,
+ .acpi_match_table = int3406_thermal_match,
+ },
+};
+
+module_platform_driver(int3406_thermal_driver);
+
+MODULE_DESCRIPTION("INT3406 Thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bddc8b17a4d8..288318ad21dd 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -164,18 +164,10 @@ static ssize_t brightness_show(struct device *dev,
return sprintf(buf, "%d\n", bd->props.brightness);
}
-static ssize_t brightness_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+int backlight_device_set_brightness(struct backlight_device *bd,
+ unsigned long brightness)
{
- int rc;
- struct backlight_device *bd = to_backlight_device(dev);
- unsigned long brightness;
-
- rc = kstrtoul(buf, 0, &brightness);
- if (rc)
- return rc;
-
- rc = -ENXIO;
+ int rc = -ENXIO;
mutex_lock(&bd->ops_lock);
if (bd->ops) {
@@ -185,7 +177,7 @@ static ssize_t brightness_store(struct device *dev,
pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
backlight_update_status(bd);
- rc = count;
+ rc = 0;
}
}
mutex_unlock(&bd->ops_lock);
@@ -194,6 +186,23 @@ static ssize_t brightness_store(struct device *dev,
return rc;
}
+EXPORT_SYMBOL(backlight_device_set_brightness);
+
+static ssize_t brightness_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ struct backlight_device *bd = to_backlight_device(dev);
+ unsigned long brightness;
+
+ rc = kstrtoul(buf, 0, &brightness);
+ if (rc)
+ return rc;
+
+ rc = backlight_device_set_brightness(bd, brightness);
+
+ return rc ? rc : count;
+}
static DEVICE_ATTR_RW(brightness);
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -380,7 +389,7 @@ struct backlight_device *backlight_device_register(const char *name,
}
EXPORT_SYMBOL(backlight_device_register);
-bool backlight_device_registered(enum backlight_type type)
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
{
bool found = false;
struct backlight_device *bd;
@@ -394,9 +403,9 @@ bool backlight_device_registered(enum backlight_type type)
}
mutex_unlock(&backlight_dev_list_mutex);
- return found;
+ return found ? bd : NULL;
}
-EXPORT_SYMBOL(backlight_device_registered);
+EXPORT_SYMBOL(backlight_device_get_by_type);
/**
* backlight_device_unregister - unregisters a backlight device object.
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 983280e8d93f..e5a391aecde1 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -761,7 +761,7 @@ config FB_VESA
config FB_EFI
bool "EFI-based Framebuffer Support"
- depends on (FB = y) && X86 && EFI
+ depends on (FB = y) && !IA64 && EFI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 95d293b7445a..f4c045c0051c 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -6,16 +6,14 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/efi.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
-#include <linux/dmi.h>
-#include <linux/pci.h>
#include <video/vga.h>
-#include <asm/sysfb.h>
+#include <asm/efi.h>
static bool request_mem_succeeded = false;
@@ -85,21 +83,13 @@ static struct fb_ops efifb_ops = {
static int efifb_setup(char *options)
{
char *this_opt;
- int i;
if (options && *options) {
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
- for (i = 0; i < M_UNKNOWN; i++) {
- if (efifb_dmi_list[i].base != 0 &&
- !strcmp(this_opt, efifb_dmi_list[i].optname)) {
- screen_info.lfb_base = efifb_dmi_list[i].base;
- screen_info.lfb_linelength = efifb_dmi_list[i].stride;
- screen_info.lfb_width = efifb_dmi_list[i].width;
- screen_info.lfb_height = efifb_dmi_list[i].height;
- }
- }
+ efifb_setup_from_dmi(&screen_info, this_opt);
+
if (!strncmp(this_opt, "base:", 5))
screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "stride:", 7))
@@ -338,5 +328,4 @@ static struct platform_driver efifb_driver = {
.remove = efifb_remove,
};
-module_platform_driver(efifb_driver);
-MODULE_LICENSE("GPL");
+builtin_platform_driver(efifb_driver);
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index be7e56a338e8..e9d2135445c1 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -316,7 +316,6 @@ static const struct efi efi_xen __initconst = {
.get_next_high_mono_count = xen_efi_get_next_high_mono_count,
.reset_system = NULL, /* Functionality provided by Xen. */
.set_virtual_address_map = NULL, /* Not used under Xen. */
- .memmap = NULL, /* Not used under Xen. */
.flags = 0 /* Initialized later. */
};